Skip to content
Snippets Groups Projects
Commit 4db451e8 authored by Angel Santamaria-Navarro's avatar Angel Santamaria-Navarro
Browse files

Add gtest matchers

parent 5a5d91da
No related branches found
No related tags found
No related merge requests found
......@@ -202,6 +202,7 @@ INCLUDE (${PROJECT_SOURCE_DIR}/cmake_modules/FindYamlCpp.cmake)
IF(YAMLCPP_FOUND)
MESSAGE(STATUS "Yaml-cpp related sources will be built.")
ADD_DEFINITIONS( -DUSING_YAML )
SET(USING_YAML TRUE)
ELSEIF(YAMLCPP_FOUND)
MESSAGE("[WARN] yaml-cpp Library NOT FOUND!")
ENDIF(YAMLCPP_FOUND)
......
......@@ -19,7 +19,7 @@ struct DetectorParamsAKAZE: public ParamsBase
int descriptor_size = 0; // Size of the descriptor in bits. 0 -> Full size
int descriptor_channels = 3; // Number of channels in the descriptor (1, 2, 3)
float threshold = 0.001f; // Detector response threshold to accept point
int nOctaves = 2; // Maximum octave evolution of the image
int nOctaves = 4; // Maximum octave evolution of the image
int nOctaveLayers = 4; // Default number of sublevels per scale level
int diffusivity = cv::KAZE::DIFF_PM_G2; // Diffusivity type. DIFF_PM_G1=0, DIFF_PM_G2=1, DIFF_WEICKERT=2, DIFF_CHARBONNIER=3
~DetectorParamsAKAZE(){}
......
......@@ -17,7 +17,8 @@ struct DetectorParamsFAST: public ParamsBase
{
int threshold = 10; // threshold on difference between intensity of the central pixel and pixels of a circle around this pixel.
bool nonmaxSuppression = true; // if true, non-maximum suppression is applied to detected corners (keypoints).
int neighbortype = cv::FastFeatureDetector::TYPE_9_16; // one of the three neighborhoods as defined in the paper: FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, FastFeatureDetector::TYPE_5_8
int neighbortype = cv::FastFeatureDetector::TYPE_5_8; // one of the three neighborhoods as defined in the paper: FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, FastFeatureDetector::TYPE_5_8
~DetectorParamsFAST(){}
};
......
......@@ -22,7 +22,7 @@ struct DetectorParamsORB: public ParamsBase
int firstLevel = 0; // It should be 0 in the current implementation.
int WTA_K = 2; // The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
int scoreType = cv::ORB::HARRIS_SCORE; // The default HARRIS_SCORE means that Harris algorithm is used to rank features (the score is written to KeyPoint::score and is used to retain best nfeatures features); FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, but it is a little faster to compute.
int patchSize = 31; // size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
int patchSize = 16; // size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
~DetectorParamsORB(){}
};
......
......@@ -16,6 +16,8 @@ VU_PTR_TYPEDEFS(DetectorParamsSBD);
struct DetectorParamsSBD: public ParamsBase
{
cv::SimpleBlobDetector::Params cv_params = cv::SimpleBlobDetector::Params();
DetectorParamsSBD(){cv_params.filterByColor = false;}
~DetectorParamsSBD(){}
};
......
......@@ -4,4 +4,4 @@ detector:
type: "FAST"
threshold: 10
nonmaxSuppression: true
neighbor type: 0 #enum { TYPE_9_16=0, TYPE_7_12=1, TYPE_5_8=2 };
neighbor type: 0 #enum { TYPE_5_8=0, TYPE_7_12=1, TYPE_9_16=2 };
......@@ -64,6 +64,10 @@ target_link_libraries(gtest_example ${PROJECT_NAME}) #
vu_add_gtest(gtest_detectors gtest_detectors.cpp)
target_link_libraries(gtest_detectors ${PROJECT_NAME})
# Matchers
vu_add_gtest(gtest_matchers gtest_matchers.cpp)
target_link_libraries(gtest_matchers ${PROJECT_NAME})
# Sensor usb_cam
vu_add_gtest(gtest_sensor_usb_cam gtest_sensor_usb_cam.cpp)
target_link_libraries(gtest_sensor_usb_cam ${PROJECT_NAME})
......
......@@ -4,4 +4,4 @@ detector:
type: "FAST"
threshold: 10
nonmaxSuppression: true
neighbor type: 0 #enum { TYPE_9_16=0, TYPE_7_12=1, TYPE_5_8=2 };
neighbor type: 0 #enum { TYPE_5_8=0, TYPE_7_12=1, TYPE_9_16=2 };
This diff is collapsed.
This diff is collapsed.
......@@ -48,78 +48,6 @@ TEST(MatcherBase, getParams)
ASSERT_EQ(params->match_type,1);
}
TEST(MatcherBase, match)
{
cv::Mat image1 = cv::imread(filename1, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename1 << std::endl;
cv::Mat image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename2 << std::endl;
// Define detector
std::string det_name = vision_utils::readYamlType(yaml_file_params, "detector");
ASSERT_TRUE(det_name.compare("ORB")==0);
vision_utils::DetectorBasePtr det_ptr = vision_utils::setupDetector(det_name, det_name + " detector", yaml_file_params);
det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
std::string des_name = vision_utils::readYamlType(yaml_file_params, "descriptor");
ASSERT_TRUE(des_name.compare("ORB")==0);
vision_utils::DescriptorBasePtr des_ptr = vision_utils::setupDescriptor(des_name, des_name + " descriptor", yaml_file_params);
des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Define matcher
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>();
vision_utils::MatcherBasePtr mat_ptr;
// MATCH type
params_ptr->match_type = vision_utils::MATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
params_ptr = mat_ptr->getParams();
ASSERT_EQ(params_ptr->match_type,1);
// match
DMatchVector matches;
mat_ptr->match(desc1,desc2,matches);
ASSERT_EQ(matches.size(),80);
// filter
DMatchVector best_matches;
KeyPointVector kpts_matched_img2;
KeyPointVector kpts_matched_img1;
mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches, image1.rows, image1.cols, best_matches, kpts_matched_img2, kpts_matched_img1);
ASSERT_EQ(best_matches.size(),37);
// KNNMATCH type
params_ptr->match_type = vision_utils::KNNMATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
ASSERT_EQ(mat_ptr->getParams()->match_type,2);
// match
std::vector< DMatchVector > matches_vec;
mat_ptr->match(desc1,desc2,matches_vec);
ASSERT_EQ(matches.size(),80);
// filter
mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches_vec, image1.rows, image1.cols, best_matches, kpts_matched_img2, kpts_matched_img1);
ASSERT_EQ(best_matches.size(),85);
// RADIUSMATCH type currently not implemented:
// OpenCV Error: The function/feature is not implemented (LSH index does not support radiusSearch operation) in radiusSearch
}
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
......
#include "utils_gtest.h"
#include "../vision_utils.h"
#include "../detectors.h"
#include "../descriptors.h"
#include "../matchers.h"
std::string matcher_name = "BRUTEFORCE";
std::string vu_root = _VU_ROOT_DIR;
std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
std::string filename1 = vu_root + "/src/test/data/img_1.png";
std::string filename2 = vu_root + "/src/test/data/img_2.png";
TEST(Matchers, LoadImagesFromFile)
{
cv::Mat image1 = cv::imread(filename1, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename1 << std::endl;
cv::Mat image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename2 << std::endl;
}
TEST(Matchers, MATCH)
{
cv::Mat image1 = cv::imread(filename1, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename1 << std::endl;
cv::Mat image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename2 << std::endl;
// Define detector
std::string det_name = vision_utils::readYamlType(yaml_file_params, "detector");
ASSERT_TRUE(det_name.compare("ORB")==0);
vision_utils::DetectorBasePtr det_ptr = vision_utils::setupDetector(det_name, det_name + " detector", yaml_file_params);
det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
std::string des_name = vision_utils::readYamlType(yaml_file_params, "descriptor");
ASSERT_TRUE(des_name.compare("ORB")==0);
vision_utils::DescriptorBasePtr des_ptr = vision_utils::setupDescriptor(des_name, des_name + " descriptor", yaml_file_params);
des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Define matcher
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>();
vision_utils::MatcherBasePtr mat_ptr;
// MATCH type
params_ptr->match_type = vision_utils::MATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
params_ptr = mat_ptr->getParams();
ASSERT_EQ(params_ptr->match_type,1);
// match
DMatchVector matches;
mat_ptr->match(desc1,desc2,matches);
ASSERT_EQ(matches.size(),80);
// filter
DMatchVector best_matches;
KeyPointVector kpts_matched_img2;
KeyPointVector kpts_matched_img1;
mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches, image1.rows, image1.cols, best_matches, kpts_matched_img2, kpts_matched_img1);
ASSERT_EQ(best_matches.size(),37);
}
TEST(Matchers, KNNMATCH)
{
cv::Mat image1 = cv::imread(filename1, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename1 << std::endl;
cv::Mat image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename2 << std::endl;
// Define detector
std::string det_name = vision_utils::readYamlType(yaml_file_params, "detector");
ASSERT_TRUE(det_name.compare("ORB")==0);
vision_utils::DetectorBasePtr det_ptr = vision_utils::setupDetector(det_name, det_name + " detector", yaml_file_params);
det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
std::string des_name = vision_utils::readYamlType(yaml_file_params, "descriptor");
ASSERT_TRUE(des_name.compare("ORB")==0);
vision_utils::DescriptorBasePtr des_ptr = vision_utils::setupDescriptor(des_name, des_name + " descriptor", yaml_file_params);
des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Define matcher
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>();
vision_utils::MatcherBasePtr mat_ptr;
// KNNMATCH type
params_ptr->match_type = vision_utils::KNNMATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
ASSERT_EQ(mat_ptr->getParams()->match_type,2);
// match
std::vector< DMatchVector > matches_vec;
mat_ptr->match(desc1,desc2,matches_vec);
ASSERT_EQ(matches_vec.size(),80);
// filter
DMatchVector best_matches;
KeyPointVector kpts_matched_img2;
KeyPointVector kpts_matched_img1;
mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches_vec, image1.rows, image1.cols, best_matches, kpts_matched_img2, kpts_matched_img1);
ASSERT_EQ(best_matches.size(),48);
}
TEST(Matchers, RADIUSMATCH)
{
cv::Mat image1 = cv::imread(filename1, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename1 << std::endl;
cv::Mat image2 = cv::imread(filename2, CV_LOAD_IMAGE_GRAYSCALE);
ASSERT_TRUE(image1.data)<< "Failed to load image " << filename2 << std::endl;
// Define detector
std::string det_name = vision_utils::readYamlType(yaml_file_params, "detector");
ASSERT_TRUE(det_name.compare("ORB")==0);
vision_utils::DetectorBasePtr det_ptr = vision_utils::setupDetector(det_name, det_name + " detector", yaml_file_params);
det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_ptr);
ASSERT_TRUE(det_ptr!=NULL);
// Define descriptor
std::string des_name = vision_utils::readYamlType(yaml_file_params, "descriptor");
ASSERT_TRUE(des_name.compare("ORB")==0);
vision_utils::DescriptorBasePtr des_ptr = vision_utils::setupDescriptor(des_name, des_name + " descriptor", yaml_file_params);
des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_ptr);
ASSERT_TRUE(des_ptr!=NULL);
// Image 1
KeyPointVector kpts1 = det_ptr->detect(image1);
cv::Mat desc1 = des_ptr->getDescriptor(image1,kpts1);
ASSERT_TRUE(kpts1.size() == desc1.rows);
// Image 2
KeyPointVector kpts2 = det_ptr->detect(image2);
cv::Mat desc2 = des_ptr->getDescriptor(image2,kpts2);
ASSERT_TRUE(kpts2.size() == desc2.rows);
// Define matcher
vision_utils::MatcherParamsBasePtr params_ptr = std::make_shared<vision_utils::MatcherParamsBase>();
vision_utils::MatcherBasePtr mat_ptr;
// RADIUSMATCH type
params_ptr->match_type = vision_utils::RADIUSMATCH;
mat_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, params_ptr);
ASSERT_EQ(mat_ptr->getParams()->match_type,3);
// RADIUSMATCH type currently not implemented:
// OpenCV Error: The function/feature is not implemented (LSH index does not support radiusSearch operation) in radiusSearch
// match
// std::vector< DMatchVector > matches_vec;
// mat_ptr->match(desc1,desc2,matches_vec);
// ASSERT_EQ(matches_vec.size(),80);
//
// // filter
// DMatchVector best_matches;
// KeyPointVector kpts_matched_img2;
// KeyPointVector kpts_matched_img1;
// mat_ptr->filterByDistance(10, 0.25, kpts1, kpts2, matches_vec, image1.rows, image1.cols, best_matches, kpts_matched_img2, kpts_matched_img1);
// ASSERT_EQ(best_matches.size(),48);
}
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment