Skip to content
Snippets Groups Projects
Commit eb16f9cf authored by Angel Santamaria-Navarro's avatar Angel Santamaria-Navarro
Browse files

add fundamental matrix example

parent 4db451e8
No related branches found
No related tags found
No related merge requests found
...@@ -299,7 +299,7 @@ if(BUILD_TESTS) ...@@ -299,7 +299,7 @@ if(BUILD_TESTS)
endif() endif()
## Examples ## ## Examples ##
IF(BUILD_EXAMPLES) IF(BUILD_EXAMPLES AND YAMLCPP_FOUND)
MESSAGE("Building examples.") MESSAGE("Building examples.")
ADD_SUBDIRECTORY(examples) ADD_SUBDIRECTORY(examples)
ENDIF(BUILD_EXAMPLES) ENDIF(BUILD_EXAMPLES AND YAMLCPP_FOUND)
...@@ -127,7 +127,7 @@ void AlgorithmTRACKFEATURES::trackFrame(const FramePtr& _frame_old, FramePtr& _t ...@@ -127,7 +127,7 @@ void AlgorithmTRACKFEATURES::trackFrame(const FramePtr& _frame_old, FramePtr& _t
cv::line(image_draw_, tracked_kp.pt, target_kps[ii].pt, cv::Scalar(0, 0, 255), 3); cv::line(image_draw_, tracked_kp.pt, target_kps[ii].pt, cv::Scalar(0, 0, 255), 3);
cv::circle(image_draw_, tracked_kp.pt, 5, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0); cv::circle(image_draw_, tracked_kp.pt, 5, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
cv::putText(image_draw_, std::to_string(ii), tracked_kp.pt, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 0)); cv::putText(image_draw_, std::to_string(ii), tracked_kp.pt, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 0));
} }
} }
} }
} }
......
...@@ -15,7 +15,7 @@ VU_PTR_TYPEDEFS(DetectorParamsORB); ...@@ -15,7 +15,7 @@ VU_PTR_TYPEDEFS(DetectorParamsORB);
*/ */
struct DetectorParamsORB: public ParamsBase struct DetectorParamsORB: public ParamsBase
{ {
int nfeatures = 20; // The maximum number of features to retain. int nfeatures = 200; // The maximum number of features to retain.
float scaleFactor = 1.2f; // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer. float scaleFactor = 1.2f; // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
int nlevels = 8; // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels). int nlevels = 8; // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
int edgeThreshold = 16; // This is size of the border where the features are not detected. It should roughly match the patchSize parameter. int edgeThreshold = 16; // This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
......
# create an example application # Factories
ADD_EXECUTABLE(test_factories test_factories.cpp ) ADD_EXECUTABLE(test_factories test_factories.cpp )
TARGET_LINK_LIBRARIES(test_factories ${PROJECT_NAME})
# Sensor
ADD_EXECUTABLE(test_sensor test_sensor.cpp) ADD_EXECUTABLE(test_sensor test_sensor.cpp)
TARGET_LINK_LIBRARIES(test_sensor vision_utils)
# Detector
ADD_EXECUTABLE(test_detector test_detector.cpp) ADD_EXECUTABLE(test_detector test_detector.cpp)
TARGET_LINK_LIBRARIES(test_detector vision_utils)
# Descriptor
ADD_EXECUTABLE(test_descriptor test_descriptor.cpp) ADD_EXECUTABLE(test_descriptor test_descriptor.cpp)
TARGET_LINK_LIBRARIES(test_descriptor vision_utils)
# Matcher
ADD_EXECUTABLE(test_matcher test_matcher.cpp) ADD_EXECUTABLE(test_matcher test_matcher.cpp)
TARGET_LINK_LIBRARIES(test_matcher vision_utils)
# Alg: optical flow
ADD_EXECUTABLE(test_algorithm_opticalflowpyrlk test_algorithm_opticalflowpyrlk.cpp) ADD_EXECUTABLE(test_algorithm_opticalflowpyrlk test_algorithm_opticalflowpyrlk.cpp)
TARGET_LINK_LIBRARIES(test_algorithm_opticalflowpyrlk vision_utils)
# Alg: track features
ADD_EXECUTABLE(test_algorithm_trackfeatures test_algorithm_trackfeatures.cpp) ADD_EXECUTABLE(test_algorithm_trackfeatures test_algorithm_trackfeatures.cpp)
TARGET_LINK_LIBRARIES(test_algorithm_trackfeatures vision_utils)
# Alg: active search
ADD_EXECUTABLE(test_algorithm_activesearch test_algorithm_activesearch.cpp) ADD_EXECUTABLE(test_algorithm_activesearch test_algorithm_activesearch.cpp)
TARGET_LINK_LIBRARIES(test_algorithm_activesearch vision_utils)
# link necessary libraries # eg: Fundamental matrix
TARGET_LINK_LIBRARIES(test_factories ${PROJECT_NAME}) ADD_EXECUTABLE(fundamental_matrix fundamental_matrix.cpp)
TARGET_LINK_LIBRARIES(test_sensor vision_utils) TARGET_LINK_LIBRARIES(fundamental_matrix vision_utils)
TARGET_LINK_LIBRARIES(test_detector vision_utils) \ No newline at end of file
TARGET_LINK_LIBRARIES(test_descriptor vision_utils)
TARGET_LINK_LIBRARIES(test_matcher vision_utils)
TARGET_LINK_LIBRARIES(test_algorithm_opticalflowpyrlk vision_utils)
TARGET_LINK_LIBRARIES(test_algorithm_trackfeatures vision_utils)
TARGET_LINK_LIBRARIES(test_algorithm_activesearch vision_utils)
\ No newline at end of file
/*
* test_opencv.cpp
*
* Created on: Apr 24, 2016
* Author: jsola
*/
// Testing things for the 3D image odometry
// general includes
#include "unistd.h"
#include <time.h>
// Vision utils
#include "../vision_utils.h"
#include "../detectors.h"
#include "../descriptors.h"
#include "../matchers.h"
#include "../common_class/buffer.h"
// General includes
#include <cmath>
#include <complex> // std::complex, std::norm
//std includes
#include <iostream>
int main(int argc, char** argv)
{
std::cout << std::endl << " ========= OpenCv test ===========" << std::endl << std::endl;
// parsing input params
const char * filename;
cv::VideoCapture capture;
if (argc < 2)
{
std::cout << "Please use\n\t./test_opencv <arg> "
"\nwith "
"\n\targ = <path/videoname.mp4> for video input "
"\n\targ = 0 for camera input." << std::endl;
return 0;
}
else if (std::string(argv[1]) == "0")
{
filename = "0"; // camera
capture.open(0);
std::cout << "Input stream from camera " << std::endl;
}
else
{
filename = argv[1]; // provided through argument
capture.open(filename);
std::cout << "Input video file: " << filename << std::endl;
}
// Open input stream
if (!capture.isOpened()) // check if we succeeded
std::cout << "failed" << std::endl;
else
std::cout << "succeded" << std::endl;
// set image detector
vision_utils::DetectorParamsORBPtr det_params = std::make_shared<vision_utils::DetectorParamsORB>();
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector("ORB", "ORB detector", det_params);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
// set image descriptor
vision_utils::DescriptorParamsORBPtr desc_params = std::make_shared<vision_utils::DescriptorParamsORB>();
vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor("ORB", "ORB detector", desc_params);
vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
// set matcher
vision_utils::MatcherParamsBasePtr mat_params = std::make_shared<vision_utils::MatcherParamsBase>();
mat_params->match_type = vision_utils::MATCH;
std::string matcher_name = "BRUTEFORCE_HAMMING_2";
vision_utils::MatcherBasePtr mat_b_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, mat_params);
vision_utils::MatcherBRUTEFORCE_HAMMING_2Ptr mat_ptr = std::static_pointer_cast<vision_utils::MatcherBRUTEFORCE_HAMMING_2>(mat_b_ptr);
// set and print image properties
unsigned int img_width = (unsigned int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
unsigned int img_height = (unsigned int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
// set the image buffer
unsigned int buffer_size = 5;
vision_utils::Buffer<cv::Mat> image_buffer(buffer_size);
cv::Mat img;
capture >> img; image_buffer.add(img);
// user interface
cv::namedWindow("Feature tracker"); // Creates a window for display.
cv::moveWindow("Feature tracker", 0, 0);
// First image
KeyPointVector keypoints_1 = det_ptr->detect(image_buffer.back());
cv::Mat descriptors_1 = des_ptr->getDescriptor( image_buffer.back(), keypoints_1);
while(1)
{
capture >> img;
// declare all variables
DMatchVector matches;
cv::Mat inliers_bool;
cv::Mat img_matches;
cv::Mat img_scaled;
PointVector matched_1, matched_2;
DMatchVector good_matches, inlier_matches;
PointVector inliers_1, inliers_2;
// detect and describe in new image
KeyPointVector keypoints_2 = det_ptr->detect(img);
if (keypoints_2.size()>0)
{
image_buffer.add(img);
cv::Mat descriptors_2 = des_ptr->getDescriptor( image_buffer.back(), keypoints_1);
unsigned int max_dist = 0;
unsigned int min_dist = 512;
if (keypoints_1.size() * keypoints_2.size() != 0)
{
// match (try flann later)
//-- Step 3: Matching descriptor vectors using FLANN matcher
mat_ptr->match(descriptors_1,descriptors_2,matches);
//-- Quick calculation of max and min distances between keypoints
for (int ii = 0; ii < descriptors_1.rows; ii++)
{
unsigned int dist = matches[ii].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
}
else
{
min_dist = 999;
max_dist = 999;
}
printf("-- Max dist : %d \n", max_dist);
printf("-- Min dist : %d \n", min_dist);
//-- Select only "good" matches (i.e. those at a distance which is small)
for (unsigned int ii = 0; ii < matches.size(); ii++)
{
if (matches[ii].distance <= 50) //std::max(2*min_dist, 0.02) )
{
good_matches.push_back(matches[ii]);
matched_1.push_back(keypoints_1[matches[ii].trainIdx].pt);
matched_2.push_back(keypoints_1[matches[ii].queryIdx].pt);
}
}
// Compute the Fundamental matrix F and discard outliers through ransac in one go
if (good_matches.size() > 20) // Minimum is 8 points, but we need more for robustness
{
// find fundamental matrix using RANSAC, return set of inliers as bool vector
cv::Mat F = findFundamentalMat(matched_1, matched_2, cv::FM_RANSAC, 3.0, 0.98, inliers_bool);
std::cout << "F = " << F << std::endl;
// Recover the inlier matches
for (unsigned int ii = 0; ii < good_matches.size(); ii++)
if (inliers_bool.at<char>(ii) == 1)
inlier_matches.push_back(good_matches[ii]);
}
std::cout << "Matches: initial " << matches.size() << " | good " << good_matches.size() << " | inliers "
<< inlier_matches.size() << std::endl;
std::cout << "+++++++++++++++" << std::endl;
std::cout << keypoints_1.size() << " " << keypoints_2.size() << " " << inlier_matches.size() << " " << img_matches.size() << std::endl;
// Draw RANSAC inliers
cv::drawMatches(image_buffer[image_buffer.size()-2], keypoints_1, image_buffer.back(), keypoints_2, inlier_matches, img_matches,
cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 0); //cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
// Show detected matches
imshow("Feature tracker", img_matches);
cv::waitKey(1);
keypoints_1.clear();
keypoints_1 = keypoints_2;
descriptors_1 = descriptors_2;
}
}
}
...@@ -3,25 +3,25 @@ sensor: ...@@ -3,25 +3,25 @@ sensor:
detector: detector:
type: "ORB" type: "ORB"
nfeatures: 100 nfeatures: 200
scale factor: 1.2 scale factor: 1.2
nlevels: 8 nlevels: 8
edge threshold: 8 # 16 edge threshold: 8 # 16
first level: 0 first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5 WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; score type: 0 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31 patch size: 16 # 31
descriptor: descriptor:
type: "ORB" type: "ORB"
nfeatures: 100 nfeatures: 200
scale factor: 1.2 scale factor: 1.2
nlevels: 8 nlevels: 8
edge threshold: 8 # 16 edge threshold: 8 # 16
first level: 0 first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5 WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; score type: 0 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31 patch size: 16 # 31
matcher: matcher:
type: "FLANNBASED" type: "FLANNBASED"
...@@ -35,6 +35,6 @@ algorithm: ...@@ -35,6 +35,6 @@ algorithm:
margin: 0 margin: 0
separation: 5 separation: 5
min features to track: 20 min features to track: 20
max new features: 100 max new features: 200
min response new features: 80 min response new features: 80
matcher min normalized score: 0.85 matcher min normalized score: 0.85
\ No newline at end of file
...@@ -9,8 +9,8 @@ detector: ...@@ -9,8 +9,8 @@ detector:
edge threshold: 8 # 16 edge threshold: 8 # 16
first level: 0 first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5 WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; score type: 0 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31 patch size: 16 # 31
algorithm: algorithm:
type: "OPTFLOWPYRLK" type: "OPTFLOWPYRLK"
......
...@@ -9,8 +9,8 @@ detector: ...@@ -9,8 +9,8 @@ detector:
edge threshold: 8 # 16 edge threshold: 8 # 16
first level: 0 first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5 WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; score type: 0 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31 patch size: 16 # 31
descriptor: descriptor:
type: "ORB" type: "ORB"
...@@ -20,7 +20,7 @@ descriptor: ...@@ -20,7 +20,7 @@ descriptor:
edge threshold: 8 # 16 edge threshold: 8 # 16
first level: 0 first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5 WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; score type: 0 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31 patch size: 16 # 31
...@@ -58,7 +58,7 @@ TEST(Descriptors, BRIEF) ...@@ -58,7 +58,7 @@ TEST(Descriptors, BRIEF)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr); vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 17); ASSERT_EQ(keypoints.size(), 135);
vision_utils::DescriptorParamsBRIEFPtr des_params = std::make_shared<vision_utils::DescriptorParamsBRIEF>(); vision_utils::DescriptorParamsBRIEFPtr des_params = std::make_shared<vision_utils::DescriptorParamsBRIEF>();
std::string des_name = "BRIEF"; std::string des_name = "BRIEF";
...@@ -108,7 +108,7 @@ TEST(Descriptors, DAISY) ...@@ -108,7 +108,7 @@ TEST(Descriptors, DAISY)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr); vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 17); ASSERT_EQ(keypoints.size(), 135);
vision_utils::DescriptorParamsDAISYPtr des_params = std::make_shared<vision_utils::DescriptorParamsDAISY>(); vision_utils::DescriptorParamsDAISYPtr des_params = std::make_shared<vision_utils::DescriptorParamsDAISY>();
std::string des_name = "DAISY"; std::string des_name = "DAISY";
...@@ -133,7 +133,7 @@ TEST(Descriptors, FREAK) ...@@ -133,7 +133,7 @@ TEST(Descriptors, FREAK)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr); vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 17); ASSERT_EQ(keypoints.size(), 135);
vision_utils::DescriptorParamsFREAKPtr des_params = std::make_shared<vision_utils::DescriptorParamsFREAK>(); vision_utils::DescriptorParamsFREAKPtr des_params = std::make_shared<vision_utils::DescriptorParamsFREAK>();
std::string des_name = "FREAK"; std::string des_name = "FREAK";
...@@ -183,7 +183,7 @@ TEST(Descriptors, LATCH) ...@@ -183,7 +183,7 @@ TEST(Descriptors, LATCH)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr); vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 17); ASSERT_EQ(keypoints.size(), 135);
vision_utils::DescriptorParamsLATCHPtr des_params = std::make_shared<vision_utils::DescriptorParamsLATCH>(); vision_utils::DescriptorParamsLATCHPtr des_params = std::make_shared<vision_utils::DescriptorParamsLATCH>();
std::string des_name = "LATCH"; std::string des_name = "LATCH";
...@@ -208,7 +208,7 @@ TEST(Descriptors, LUCID) ...@@ -208,7 +208,7 @@ TEST(Descriptors, LUCID)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr); vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 20); ASSERT_EQ(keypoints.size(), 200);
vision_utils::DescriptorParamsLUCIDPtr des_params = std::make_shared<vision_utils::DescriptorParamsLUCID>(); vision_utils::DescriptorParamsLUCIDPtr des_params = std::make_shared<vision_utils::DescriptorParamsLUCID>();
std::string des_name = "LUCID"; std::string des_name = "LUCID";
...@@ -233,7 +233,7 @@ TEST(Descriptors, ORB) ...@@ -233,7 +233,7 @@ TEST(Descriptors, ORB)
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params); vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr); vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image); std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
ASSERT_EQ(keypoints.size(), 17); ASSERT_EQ(keypoints.size(), 135);
vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>(); vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>();
std::string des_name = "ORB"; std::string des_name = "ORB";
......
...@@ -4,12 +4,19 @@ ...@@ -4,12 +4,19 @@
std::string sen_name = "USB_CAM"; std::string sen_name = "USB_CAM";
std::string vu_root = _VU_ROOT_DIR; std::string vu_root = _VU_ROOT_DIR;
std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
int cam_num = 0; int cam_num = 0;
vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
TEST(SensorUsbCam, constructor) TEST(SensorUsbCam, constructor)
{ {
vision_utils::IntrinsicsCameraPtr params = std::make_shared<vision_utils::IntrinsicsCamera>();
std::string sen_name = "USB_CAM";
vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, params);
#ifdef USING_YAML
std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
sen_name = vision_utils::readYamlType(yaml_file_params, "sensor");
sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
#endif
vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr); vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr);
ASSERT_TRUE(sen_ptr!=NULL); ASSERT_TRUE(sen_ptr!=NULL);
ASSERT_TRUE(sen_name.compare(sen_ptr->getName() ) == 0); ASSERT_TRUE(sen_name.compare(sen_ptr->getName() ) == 0);
...@@ -17,6 +24,14 @@ TEST(SensorUsbCam, constructor) ...@@ -17,6 +24,14 @@ TEST(SensorUsbCam, constructor)
TEST(SensorUsbCam, getImage) TEST(SensorUsbCam, getImage)
{ {
vision_utils::IntrinsicsCameraPtr params = std::make_shared<vision_utils::IntrinsicsCamera>();
std::string sen_name = "USB_CAM";
vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, params);
#ifdef USING_YAML
std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
sen_name = vision_utils::readYamlType(yaml_file_params, "sensor");
sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
#endif
vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr); vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr);
std::cout << " "; std::cout << " ";
if (sen_ptr->open(cam_num)) if (sen_ptr->open(cam_num))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment