Skip to content
Snippets Groups Projects
Commit edbf2668 authored by Angel Santamaria-Navarro's avatar Angel Santamaria-Navarro
Browse files

WIP: adding trifocaltensor test

parent 388ec981
No related branches found
No related tags found
No related merge requests found
......@@ -33,7 +33,17 @@ void TrifocalTensor::print() const
}
}
// compute elements of the tensor
void TrifocalTensor::fillTensor(const Eigen::VectorXd& tensorVector)
{
for(size_t kk = 0; kk<3; ++kk) {
for(size_t ii = 0; ii<3; ++ii) {
for(size_t jj = 0; jj<3; ++jj) {
tensor_[ii][jj][kk] = tensorVector(jj + ii*3 + kk*9);
}
}
}
}
void TrifocalTensor::computeTensor(const Eigen::MatrixXd& list1, const Eigen::MatrixXd& list2, const Eigen::MatrixXd& list3)
{
Eigen::MatrixXd A;
......@@ -80,16 +90,6 @@ Eigen::VectorXd TrifocalTensor::resolveEquationWithSVD(Eigen::MatrixXd& A, size_
return tensorVector;
}
void TrifocalTensor::fillTensor(const Eigen::VectorXd& tensorVector)
{
for(size_t kk = 0; kk<3; ++kk) {
for(size_t ii = 0; ii<3; ++ii) {
for(size_t jj = 0; jj<3; ++jj) {
tensor_[ii][jj][kk] = tensorVector(jj + ii*3 + kk*9);
}
}
}
}
void TrifocalTensor::computeMatrixB(const float* p1, const float* p2, Eigen::MatrixXd& B, Eigen::VectorXd& b)
{
......
......@@ -3,6 +3,7 @@
#include <Eigen/Dense>
// vision utils includes
#include "../vision_utils.h"
namespace vision_utils {
......@@ -30,6 +31,11 @@ public:
*/
void computeTensor(const Eigen::MatrixXd& list1, const Eigen::MatrixXd& list2, const Eigen::MatrixXd& list3);
/**
* \brief Compute the coordinates of the point p3 on third img corresponding to p1 and p2 of others images
*/
void transfert(const float* p1 , const float* p2, float* p3);
private:
float tensor_[3][3][3]; // Main Tensor matrix (3x3x3)
......@@ -59,11 +65,6 @@ private:
*/
void computeMatrixB(const float* p1, const float* p2, Eigen::MatrixXd& B, Eigen::VectorXd& b);
/**
* \brief Compute the coordinates of the point p3 on third img corresponding to p1 and p2 of others images
*/
void transfert(const float* p1 , const float* p2, float* p3);
/**
* \biref Display the tensor, just for debug
*/
......
......@@ -28,6 +28,10 @@ target_link_libraries(gtest_example ${PROJECT_NAME}) #
vu_add_gtest(gtest_frame gtest_frame.cpp)
target_link_libraries(gtest_frame ${PROJECT_NAME})
# TrifocalTensor test
vu_add_gtest(gtest_trifocaltensor gtest_trifocaltensor.cpp)
target_link_libraries(gtest_trifocaltensor ${PROJECT_NAME})
# Vision Utils test
vu_add_gtest(gtest_vision_utils gtest_vision_utils.cpp)
target_link_libraries(gtest_vision_utils ${PROJECT_NAME})
......
src/test/data/img1.jpg

66.5 KiB

src/test/data/img2.jpg

68.2 KiB

src/test/data/img3.jpg

68.2 KiB

#include "utils_gtest.h"
// vision utils includes
#include "../vision_utils.h"
#include "../detectors.h"
#include "../descriptors.h"
#include "../matchers.h"
#include "../common_class/trifocaltensor.h"
cv::Mat image1, image2, image3;
std::string vu_root = _VU_ROOT_DIR;
std::string filename = vu_root + "/src/test/data/img1.jpg";
std::string filename2 = vu_root + "/src/test/data/img2.jpg";
std::string filename3 = vu_root + "/src/test/data/img3.jpg";
std::map<int, cv::Point2f> TestDetectorROI(const cv::Mat& _image, vision_utils::DetectorBasePtr _det_ptr, const std::string& _det_name, const PointVector& _points_to_check)
{
std::map<int, cv::Point2f> points_found;
_det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(_det_ptr);
if(_det_ptr==NULL)
return points_found;
unsigned int img_width = _image.cols;
std::vector<cv::KeyPoint> target_keypoints;
int roi_x;
int roi_y;
int roi_width = 50;
int roi_heigth = 50;
Eigen::VectorXi roi_center_y(3); roi_center_y << 102 , 250 , 476;
for (int ii = 0; ii<roi_center_y.size() ; ii++)
{
roi_y = roi_center_y(ii) - roi_width/2;
for(roi_x = 0; roi_x < img_width; roi_x += 5)
{
cv::Rect roi(roi_x, roi_y, roi_width, roi_heigth);
cv::Rect roi_inflated = roi;
// Detect features in ROI
target_keypoints = _det_ptr->detect(_image, roi_inflated);
// Keep only one KP in ROI
if (!target_keypoints.empty())
{
vision_utils::retainBest(target_keypoints,1);
cv::Point2f pnt = target_keypoints[0].pt;
std::cout << "****************" << std::endl;
int j = vision_utils::existsInPVec(pnt, _points_to_check, 2.0);
std::cout << points_found[j] << std::endl;
// append the keypoint to the list of keypoints found
if (j >= 0)
{
points_found[j] = pnt;
std::cout << points_found[j] << std::endl;
}
else
return points_found;
}
#ifdef _VU_DEBUG
cv::Mat image_graphics = _image.clone();
cv::drawKeypoints(image_graphics,target_keypoints,image_graphics);
cv::rectangle(image_graphics, roi, cv::Scalar(255.0, 0.0, 255.0), 1, 8, 0);
cv::rectangle(image_graphics, roi_inflated, cv::Scalar(255.0, 255.0, 0.0), 1, 8, 0);
cv::imshow("test",image_graphics);
cv::waitKey(1);
#endif
}
}
return points_found;
}
TEST(TrifocalTensor, LoadImageFromFile)
{
image1 = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename << std::endl;
image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image2.data)<< "Failed to load image " << filename2 << std::endl;
image3 = cv::imread(filename3, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image3.data)<< "Failed to load image " << filename3 << std::endl;
}
TEST(TrifocalTensor, DetectorDescriptorMatcher)
{
image1 = cv::imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "failed to load image " << filename << std::endl;
image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image2.data)<< "failed to load image " << filename2 << std::endl;
image3 = cv::imread(filename3, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image3.data)<< "failed to load image " << filename3 << std::endl;
// Define detector
vision_utils::DetectorParamsORBPtr params = std::make_shared<vision_utils::DetectorParamsORB>();
std::string det_name = "ORB";
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>();
std::string des_name = "ORB";
vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor(des_name, des_name + " detector", des_params);
vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Image 3
KeyPointVector kpts3 = det_ptr->detect(image3);
cv::Mat desc3 = des_ptr->getDescriptor(image3,kpts3);
ASSERT_TRUE(kpts3.size() == desc3.rows);
// Define matcher
std::string matcher_name = "BRUTEFORCE";
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>(matcher_name);
vision_utils::MatcherBasePtr mat_ptr;
// MATCH type
params_ptr->match_type = vision_utils::MATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
params_ptr = mat_ptr->getParams();
ASSERT_EQ(params_ptr->match_type,1);
// match 1-2
DMatchVector matches12;
mat_ptr->match(desc1,desc2,matches12);
ASSERT_EQ(matches12.size(),200);
// filter
DMatchVector best_matches12;
KeyPointVector kpts_matched12_img1;
KeyPointVector kpts_matched12_img2;
mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches12, image1.rows, image1.cols, best_matches12, kpts_matched12_img2, kpts_matched12_img1);
ASSERT_EQ(best_matches12.size(),19);
// match 2-3
DMatchVector matches23;
mat_ptr->match(desc2,desc3,matches23);
ASSERT_EQ(matches23.size(),200);
// filter
DMatchVector best_matches23;
KeyPointVector kpts_matched23_img2;
KeyPointVector kpts_matched23_img3;
mat_ptr->filterByDistance(10, 0.25, kpts2, kpts3, matches23, image1.rows, image1.cols, best_matches23, kpts_matched23_img3, kpts_matched23_img2);
ASSERT_EQ(best_matches23.size(),153);
//#ifdef _VU_DEBUG
cv::Mat img_graphics(image1.rows+image2.rows+image3.rows,image1.cols+image2.cols+image3.cols,CV_8UC3);
cv::hconcat(image1, image2, img_graphics);
for (int ii = 0; ii < kpts_matched12_img2.size(); ++ii)
kpts_matched12_img2[ii].pt.x = kpts_matched12_img2[ii].pt.x + image1.cols;
for (int ii = 0; ii < kpts_matched12_img2.size(); ++ii)
{
cv::circle(img_graphics, kpts_matched12_img1[ii].pt, 5, cv::Scalar(128,128,0));
cv::circle(img_graphics, kpts_matched12_img2[ii].pt, 5, cv::Scalar(128,128,0));
cv::line(img_graphics, kpts_matched12_img1[ii].pt, kpts_matched12_img2[ii].pt, cv::Scalar(255,0,0));
}
cv::hconcat(img_graphics, image3, img_graphics);
for (int ii = 0; ii < kpts_matched23_img3.size(); ++ii)
kpts_matched23_img3[ii].pt.x = kpts_matched23_img3[ii].pt.x + image1.cols + image2.cols;
for (int ii = 0; ii < kpts_matched12_img2.size(); ++ii)
{
cv::circle(img_graphics, kpts_matched23_img2[ii].pt, 5, cv::Scalar(128,0,128));
cv::circle(img_graphics, kpts_matched23_img3[ii].pt, 5, cv::Scalar(128,0,128));
cv::line(img_graphics, kpts_matched23_img2[ii].pt, kpts_matched23_img3[ii].pt, cv::Scalar(255,0,0));
}
cv::imshow("test3",img_graphics);
VU_WARN("TODO: Get matches in 3 the images only!");
cv::waitKey(1);
//#endif
}
TEST(TrifocalTensor, ComputeTensor)
{
}
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment