Skip to content
Snippets Groups Projects
Commit 50c46bba authored by Joan Solà Ortega's avatar Joan Solà Ortega
Browse files

Merge branch 'master' into jet_autodiff

Conflicts:
	src/examples/CMakeLists.txt
parents 60881f75 e0406590
No related branches found
No related tags found
1 merge request!111Jet autodiff
This commit is part of merge request !111. Comments created here will be created in the context of that merge request.
......@@ -74,16 +74,24 @@ libgflags.a will be installed at **/usr/local/lib**
- Build and install with:
$ cd glog
$ ./configure --with-gflags=/usr/local/
$ make
$ sudo make install
$ ./autogen.sh
$ ./configure --with-gflags=/usr/local/
$ make
$ sudo make install
libglog.so will be installed at **/usr/local/lib**
- Tourbleshooting:
If the `make` command fails with the error: `/bin/bash: aclocal-1.14: command not found`, install Glog with the following commands:
* ./autogen.sh fails with './autogen.sh: autoreconf: not found'
In a fresh installation you will probably need to install autoreconf running
$ sudo make install dh-autoreconf
* `make` command fails with the error: `/bin/bash: aclocal-1.14: command not found`
Install Glog with the following commands:
$ cd glog
$ sudo apt-get install autoconf
......@@ -300,6 +308,21 @@ At this point you might need to switch to the `catkin_build` branch of the wolf
**(6)** Run tests:
$ catkin run_tests
Troubleshooting
---------------
#### Boost
We have made our best to keep being boost-independent.
However, in case you run into a boost installation issue at execution time, check that you have boost installed.
If needed, install it with:
[Boost](http://www.boost.org/). Free peer-reviewed portable C++ source libraries.
$ sudo apt-get install libboost-all-dev
Inspiring Links
---------------
......
......@@ -56,10 +56,18 @@ IF(raw_gps_utils_FOUND)
ENDIF(raw_gps_utils_FOUND)
# OpenCV
FIND_PACKAGE(OpenCV 2 QUIET)
IF(OpenCV_FOUND)
MESSAGE("OpenCV Library FOUND: OpenCV related sources will be built.")
ENDIF(OpenCV_FOUND)
FIND_PACKAGE(OpenCV QUIET)
if (OpenCV_FOUND)
if (${OpenCV_VERSION_MAJOR} GREATER 2)
message("-- Found OpenCV ${OpenCV_VERSION_MAJOR}.${OpenCV_VERSION_MINOR}. Related sources will be built.")
ADD_DEFINITIONS(-DHAVE_OPENCV3)
SET(USE_CV true)
else(${OpenCV_VERSION_MAJOR} GREATER 2)
message("[WARN] Found OpenCV ${OpenCV_VERSION_MAJOR}.${OpenCV_VERSION_MINOR}. Notice that WOLF is prepared to work with OpenCV 3.0 and higher versions.")
endif(${OpenCV_VERSION_MAJOR} GREATER 2)
else(OpenCV_FOUND)
message("[WARN] OpenCV not found. Related sources will NOT be built.")
endif(OpenCV_FOUND)
# YAML with yaml-cpp
INCLUDE (${PROJECT_SOURCE_DIR}/cmake_modules/FindYamlCpp.cmake)
......
......@@ -122,26 +122,24 @@ IF(OpenCV_FOUND)
ENDIF(Ceres_FOUND)
# Testing opencv matching and fundamental matrix with ransac
# ADD_EXECUTABLE(test_opencv test_opencv.cpp)
# TARGET_LINK_LIBRARIES(test_opencv ${PROJECT_NAME})
ADD_EXECUTABLE(test_opencv test_opencv.cpp)
TARGET_LINK_LIBRARIES(test_opencv ${PROJECT_NAME})
# Testing OpenCV functions for projection of points
# ADD_EXECUTABLE(test_projection_points test_projection_points.cpp)
# TARGET_LINK_LIBRARIES(test_projection_points ${PROJECT_NAME})
ADD_EXECUTABLE(test_projection_points test_projection_points.cpp)
TARGET_LINK_LIBRARIES(test_projection_points ${PROJECT_NAME})
# Constraint test
ADD_EXECUTABLE(test_constraint_AHP test_constraint_AHP.cpp)
TARGET_LINK_LIBRARIES(test_constraint_AHP ${PROJECT_NAME})
# ORB tracker test
# ADD_EXECUTABLE(test_tracker_ORB test_tracker_ORB.cpp)
# TARGET_LINK_LIBRARIES(test_tracker_ORB ${PROJECT_NAME})
ADD_EXECUTABLE(test_tracker_ORB test_tracker_ORB.cpp)
TARGET_LINK_LIBRARIES(test_tracker_ORB ${PROJECT_NAME})
# ORB tracker test
# ADD_EXECUTABLE(test_ROI_ORB test_ROI_ORB.cpp)
# TARGET_LINK_LIBRARIES(test_ROI_ORB ${PROJECT_NAME})
ADD_EXECUTABLE(test_ROI_ORB test_ROI_ORB.cpp)
TARGET_LINK_LIBRARIES(test_ROI_ORB ${PROJECT_NAME})
ENDIF(OpenCV_FOUND)
......
......@@ -14,7 +14,8 @@ detector-descriptor:
matcher:
minimum normalized score: 0.85
similarity norm: 6 # enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 };
similarity norm: 6 # OpenCV 3 enum { BruteForce=1 (it uses L2 ), BruteForce-L1=2, BruteForce-Hamming=3, BruteForce-Hamming(2)=4, FlannBased=5};
# OpenCV 2 enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 };
roi:
width: 20
height: 20
......
//std includes
#include <iostream>
//opencv includes
#include "opencv2/features2d/features2d.hpp"
// OpenCV includes
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#else
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#endif
int main(int argc, char** argv)
{
......@@ -34,7 +41,7 @@ int main(int argc, char** argv)
unsigned int img_height = image.rows;
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
cv::Feature2D* detector_descriptor_ptr_;
cv::Ptr<cv::FeatureDetector> detector_descriptor_ptr_;
unsigned int nfeatures = 20;
float scaleFactor = 1.2;
......@@ -45,7 +52,7 @@ int main(int argc, char** argv)
unsigned int scoreType = 0; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
unsigned int patchSize = 31;
detector_descriptor_ptr_ = new cv::ORB(nfeatures, //
detector_descriptor_ptr_ = cv::ORB::create(nfeatures, //
scaleFactor, //
nlevels, //
edgeThreshold, //
......
......@@ -9,12 +9,20 @@
// general includes
#include "unistd.h"
#include <time.h>
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d/features2d.hpp"
// OpenCV includes
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/calib3d.hpp"
#else
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#endif
// General includes
//#include <math.h>
......@@ -30,6 +38,7 @@ int main(int argc, char** argv)
// parsing input params
const char * filename;
cv::VideoCapture capture;
if (argc < 2)
{
std::cout << "Please use\n\t./test_opencv <arg> "
......@@ -41,16 +50,17 @@ int main(int argc, char** argv)
else if (std::string(argv[1]) == "0")
{
filename = "0"; // camera
capture.open(0);
std::cout << "Input stream from camera " << std::endl;
}
else
{
filename = argv[1]; // provided through argument
capture.open(filename);
std::cout << "Input video file: " << filename << std::endl;
}
// Open input stream
cv::VideoCapture capture(filename);
if (!capture.isOpened()) // check if we succeeded
std::cout << "failed" << std::endl;
else
......@@ -71,12 +81,8 @@ int main(int argc, char** argv)
cv::moveWindow("Feature tracker", 0, 0);
// set image processors
// cv::BRISK detector(30, 0, 1.0);
// cv::BRISK descriptor(30, 0, 1.0);
cv::BFMatcher matcher(cv::NORM_HAMMING2);
cv::ORB detector(1000,1.2,8,16,0,3,0,31);
cv::ORB descriptor(1000,1.2,8,16,0,3,0,31);
// cv::FlannBasedMatcher matcher;
cv::Ptr<cv::FeatureDetector> detector_descriptor_ptr = cv::ORB::create(1000,2,8,16,0,3,0,31);
cv::Ptr<cv::DescriptorMatcher> matcher_ptr = cv::DescriptorMatcher::create("BruteForce-Hamming(2)");
// declare all variables
std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
......@@ -88,7 +94,6 @@ int main(int argc, char** argv)
std::vector<cv::Point2f> inliers_1, inliers_2;
cv::Mat img_matches;
cv::Mat img_scaled;
// double scale = 1;//0.875;
unsigned int f = 0;
capture >> image_buffer[f % buffer_size];
......@@ -116,10 +121,10 @@ int main(int argc, char** argv)
img_2 = image_buffer[(f - buffer_size + 1) % buffer_size];
// detect and describe in both images
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
descriptor.compute(img_1, keypoints_1, descriptors_1);
descriptor.compute(img_2, keypoints_2, descriptors_2);
detector_descriptor_ptr->detect(img_1, keypoints_1);
detector_descriptor_ptr->detect(img_2, keypoints_2);
detector_descriptor_ptr->compute(img_1, keypoints_1, descriptors_1);
detector_descriptor_ptr->compute(img_2, keypoints_2, descriptors_2);
unsigned int max_dist = 0;
unsigned int min_dist = 512;
......@@ -128,7 +133,7 @@ int main(int argc, char** argv)
{
// match (try flann later)
//-- Step 3: Matching descriptor vectors using FLANN matcher
matcher.match(descriptors_1, descriptors_2, matches);
matcher_ptr->match(descriptors_1, descriptors_2, matches);
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
......@@ -180,10 +185,7 @@ int main(int argc, char** argv)
// Draw RANSAC inliers
cv::drawMatches(img_1, keypoints_1, img_2, keypoints_2, inlier_matches, img_matches,
cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 0); //cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
// resize(img_matches, img_scaled, cv::Size(), scale, scale, cv::INTER_NEAREST);
// imshow("Feature tracker", img_1);
// imshow("Feature tracker", img_2);
// Show detected matches
imshow("Feature tracker", img_matches);
// pause every X images and wait for key
......
//std includes
#include <iostream>
//opencv includes
#include "opencv2/features2d/features2d.hpp"
// OpenCV includes
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#else
#include <opencv2/features2d/features2d.hpp>
#endif
#include "processor_image_landmark.h"
......@@ -12,40 +16,52 @@ int main(int argc, char** argv)
std::cout << std::endl << "==================== tracker ORB test ======================" << std::endl;
cv::VideoCapture capture;
const char * filename;
if (argc == 1)
{
// filename = "/home/jtarraso/Vídeos/gray.mp4";
filename = "/home/jtarraso/Imágenes/Test_ORB.png";
capture.open(filename);
}
else if (std::string(argv[1]) == "0")
{
//camera
filename = "0";
capture.open(0);
}
else
{
filename = argv[1];
capture.open(filename);
}
std::cout << "Input video file: " << filename << std::endl;
if(!capture.isOpened()) std::cout << "failed" << std::endl; else std::cout << "succeded" << std::endl;
capture.set(CV_CAP_PROP_POS_MSEC, 3000);
unsigned int img_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
unsigned int img_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
cv::Feature2D* detector_descriptor_ptr_;
cv::DescriptorMatcher* matcher_ptr_;
// parsing input params
const char * filename;
cv::VideoCapture capture;
if (argc < 2)
{
std::cout << "Please use\n\t./test_opencv <arg> "
"\nwith "
"\n\targ = <path/videoname.mp4> for video input "
"\n\targ = 0 for camera input." << std::endl;
return 0;
}
else if (std::string(argv[1]) == "0")
{
filename = "0"; // camera
capture.open(0);
std::cout << "Input stream from camera " << std::endl;
}
else
{
filename = argv[1]; // provided through argument
capture.open(filename);
std::cout << "Input video file: " << filename << std::endl;
}
// Open input stream
if (!capture.isOpened()) // check if we succeeded
std::cout << "failed" << std::endl;
else
std::cout << "succeded" << std::endl;
// set and print image properties
unsigned int img_width = (unsigned int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
unsigned int img_height = (unsigned int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
//=====================================================
// Environment variable for configuration files
std::string wolf_root = _WOLF_ROOT_DIR;
//=====================================================
//=====================================================
// Detector, descriptor and matcher
cv::Ptr<cv::FeatureDetector> detector_descriptor_ptr_;
cv::Ptr<cv::DescriptorMatcher> matcher_ptr_;
unsigned int nfeatures = 500;
float scaleFactor = 1.2;
float scaleFactor = 2;
unsigned int nlevels = 8;
unsigned int edgeThreshold = 16;
unsigned int firstLevel = 0;
......@@ -58,33 +74,21 @@ int main(int argc, char** argv)
unsigned int roi_width = 200;
unsigned int roi_heigth = 200;
detector_descriptor_ptr_ = new cv::ORB(nfeatures, //
detector_descriptor_ptr_ = cv::ORB::create(nfeatures, //
scaleFactor, //
nlevels, //
edgeThreshold, //
firstLevel, //
WTA_K, //
scoreType, //
patchSize);//,
// fastThreshold);
patchSize);//
//unsigned int nominal_pattern_radius = 0;
//unsigned int pattern_radius = (unsigned int)( (nominal_pattern_radius) * pow(scaleFactor, nlevels-1));
// std::cout << "nominal pattern radius: " << _dd_params->nominal_pattern_radius << std::endl;
// std::cout << "scale factor: " << params_orb->scaleFactor << std::endl;
// std::cout << "nlevels: " << params_orb->nlevels << std::endl;
unsigned int pattern_radius = (unsigned int)(patchSize);
unsigned int size_bits = detector_descriptor_ptr_->descriptorSize() * 8;
matcher_ptr_ = new cv::BFMatcher(6);
// CAPTURES
// SensorCamera* camera_ptr_;
// CaptureImage* image_ptr;
TimeStamp t = 1;
matcher_ptr_ = cv::DescriptorMatcher::create("BruteForce-Hamming(2)");
//=====================================================
unsigned int buffer_size = 20;
std::vector<cv::Mat> frame(buffer_size);
......@@ -94,10 +98,8 @@ int main(int argc, char** argv)
cv::namedWindow("Feature tracker"); // Creates a window for display.
cv::moveWindow("Feature tracker", 0, 0);
// image_ptr = new CaptureImage(t, camera_ptr_, frame[f % buffer_size]);
cv::imshow("Feature tracker", frame[f % buffer_size]);
cv::waitKey(0);
cv::waitKey(1);
std::vector<cv::KeyPoint> target_keypoints;
std::vector<cv::KeyPoint> tracked_keypoints_;
......@@ -121,8 +123,6 @@ int main(int argc, char** argv)
detector_descriptor_ptr_->detect(image_original, target_keypoints);
detector_descriptor_ptr_->compute(image_original, target_keypoints, target_descriptors);
while(!(frame[f % buffer_size].empty()))
{
f++;
......@@ -152,10 +152,10 @@ int main(int argc, char** argv)
roi_up_left_corner.y = roi.y;
//inflate
// roi.x = roi.x - pattern_radius;
// roi.y = roi.y - pattern_radius;
// roi.width = roi.width + 2*pattern_radius;
// roi.height = roi.height + 2*pattern_radius;
roi.x = roi.x - pattern_radius;
roi.y = roi.y - pattern_radius;
roi.width = roi.width + 2*pattern_radius;
roi.height = roi.height + 2*pattern_radius;
//trim
if(roi.x < 0)
......@@ -191,8 +191,6 @@ int main(int argc, char** argv)
detector_descriptor_ptr_->detect(image_roi, keypoints);
detector_descriptor_ptr_->compute(image_roi, keypoints, descriptors);
cv::Mat target_descriptor; //B(cv::Rect(0,0,vec_length,1));
target_descriptor = target_descriptors(cv::Rect(0,j,target_descriptors.cols,1));
......@@ -205,14 +203,12 @@ int main(int argc, char** argv)
if(normalized_score < 0.8)
{
std::cout << "not tracked" << std::endl;
// std::cout << "choosen_descriptor:\n" << descriptors.row(cv_matches[0].trainIdx) << std::endl;
}
else
{
std::cout << "tracked" << std::endl;
// std::cout << "choosen_descriptor:\n" << descriptors.row(cv_matches[0].trainIdx) << std::endl;
matched = true;
matched = true;
cv::Point2f point,t_point;
point.x = keypoints[cv_matches[0].trainIdx].pt.x + roi.x;
......@@ -220,11 +216,6 @@ int main(int argc, char** argv)
t_point.x = target_keypoints[j].pt.x;
t_point.y = target_keypoints[j].pt.y;
cv::circle(image_graphics, t_point, 4, cv::Scalar(51.0, 51.0, 255.0), -1, 3, 0);
cv::circle(image_graphics, point, 2, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
cv::putText(image_graphics, std::to_string(j), point, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 255.0, 0.0));
......@@ -330,7 +321,7 @@ int main(int argc, char** argv)
tracked_keypoints = 0;
cv::imshow("Feature tracker", image_graphics);
cv::waitKey(0);
cv::waitKey(1);
// f++;
// capture >> frame[f % buffer_size];
......
......@@ -10,6 +10,76 @@
namespace wolf
{
#if defined (HAVE_OPENCV3)
ProcessorImageFeature::ProcessorImageFeature(ProcessorParamsImage _params) :
ProcessorTrackerFeature("IMAGE", _params.algorithm.max_new_features),
params_(_params),
active_search_grid_()
{
// 1. detector-descriptor params
DetectorDescriptorParamsBasePtr _dd_params = _params.detector_descriptor_params_ptr;
switch (_dd_params->type){
case DD_BRISK:
{
std::shared_ptr<DetectorDescriptorParamsBrisk> params_brisk = std::static_pointer_cast<DetectorDescriptorParamsBrisk>(_dd_params);
detector_descriptor_ptr_ = cv::BRISK::create(params_brisk->threshold, //
params_brisk->octaves, //
params_brisk->pattern_scale);
detector_descriptor_params_.pattern_radius_ = std::max((unsigned int)((params_brisk->nominal_pattern_radius)*pow(2,params_brisk->octaves)),
(unsigned int)((params_brisk->nominal_pattern_radius)*params_brisk->pattern_scale));
detector_descriptor_params_.size_bits_ = detector_descriptor_ptr_->descriptorSize() * 8;
break;
}
case DD_ORB:
{
std::shared_ptr<DetectorDescriptorParamsOrb> params_orb = std::static_pointer_cast<DetectorDescriptorParamsOrb>(_dd_params);
detector_descriptor_ptr_ = cv::ORB::create(params_orb->nfeatures, //
params_orb->scaleFactor, //
params_orb->nlevels, //
params_orb->edgeThreshold, //
params_orb->firstLevel, //
params_orb->WTA_K, //
params_orb->scoreType, //
params_orb->patchSize);
detector_descriptor_params_.pattern_radius_ = params_orb->edgeThreshold;
detector_descriptor_params_.size_bits_ = detector_descriptor_ptr_->descriptorSize() * 8;
break;
}
default:
throw std::runtime_error("Unknown detector-descriptor type");
}
// 2. matcher params
// TODO: FIX this. Problems initializing with int (cv::DescriptorMatcher::create(int matcherType)
std::string matcherType = "BruteForce-Hamming"; // Default
switch (_params.matcher.similarity_norm)
{
case 1:
matcherType = "BruteForce";
break;
case 2:
matcherType = "BruteForce-L1";
break;
case 3:
matcherType = "BruteForce-Hamming";
break;
case 4:
matcherType = "BruteForce-Hamming(2)";
break;
case 5:
matcherType = "FlannBased";
break;
}
matcher_ptr_ = cv::DescriptorMatcher::create(matcherType);
}
#else
ProcessorImageFeature::ProcessorImageFeature(ProcessorParamsImage _params) :
ProcessorTrackerFeature("IMAGE", _params.algorithm.max_new_features),
matcher_ptr_(nullptr),
......@@ -56,9 +126,10 @@ ProcessorImageFeature::ProcessorImageFeature(ProcessorParamsImage _params) :
// 2. matcher params
matcher_ptr_ = std::make_shared<cv::BFMatcher>(_params.matcher.similarity_norm);
}
#endif
//Destructor
ProcessorImageFeature::~ProcessorImageFeature()
{
......
......@@ -11,29 +11,38 @@
#include "constraint_epipolar.h"
#include "processor_image_params.h"
// OpenCV includes
#include "opencv2/features2d/features2d.hpp"
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#else
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#endif
// General includes
#include <cmath>
#include <complex> // std::complex, std::norm
namespace wolf
{
namespace wolf {
WOLF_PTR_TYPEDEFS(ProcessorImageFeature);
//class
class ProcessorImageFeature : public ProcessorTrackerFeature
{
protected:
#if defined (HAVE_OPENCV3)
cv::Ptr<cv::DescriptorMatcher> matcher_ptr_;
cv::Ptr<cv::FeatureDetector> detector_descriptor_ptr_;
#else
std::shared_ptr<cv::DescriptorMatcher> matcher_ptr_;
std::shared_ptr<cv::Feature2D> detector_descriptor_ptr_;
#endif
protected:
ProcessorParamsImage params_; // Struct with parameters of the processors
......@@ -43,12 +52,12 @@ class ProcessorImageFeature : public ProcessorTrackerFeature
{
unsigned int pattern_radius_; ///< radius of the pattern used to detect a key-point at pattern_scale = 1.0 and octaves = 0
unsigned int size_bits_; ///< length of the descriptor vector in bits
}detector_descriptor_params_;
} detector_descriptor_params_;
struct
{
unsigned int width_; ///< width of the image
unsigned int height_; ///< height of the image
}image_;
} image_;
// Lists to store values to debug
std::list<cv::Rect> tracker_roi_;
......@@ -98,7 +107,8 @@ class ProcessorImageFeature : public ProcessorTrackerFeature
* \param _incoming_feature input/output feature in incoming capture to be corrected
* \return false if the the process discards the correspondence with origin's feature
*/
virtual bool correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature);
virtual bool correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature,
FeatureBasePtr _incoming_feature);
/** \brief Vote for KeyFrame generation
*
......@@ -132,7 +142,7 @@ class ProcessorImageFeature : public ProcessorTrackerFeature
* \return the number of detected features
*/
virtual unsigned int detect(cv::Mat _image, cv::Rect& _roi, std::vector<cv::KeyPoint>& _new_keypoints,
cv::Mat& new_descriptors);
cv::Mat& new_descriptors);
private:
/**
......@@ -162,8 +172,8 @@ class ProcessorImageFeature : public ProcessorTrackerFeature
* \param _cv_matches output variable in which the best result will be stored (in the position [0])
* \return normalized score of similarity (1 - exact match; 0 - complete mismatch)
*/
virtual Scalar match(cv::Mat _target_descriptor, cv::Mat _candidate_descriptors, std::vector<cv::DMatch>& _cv_matches);
virtual Scalar match(cv::Mat _target_descriptor, cv::Mat _candidate_descriptors,
std::vector<cv::DMatch>& _cv_matches);
// These only to debug, will disappear one day
public:
......@@ -173,8 +183,8 @@ class ProcessorImageFeature : public ProcessorTrackerFeature
virtual void resetVisualizationFlag(FeatureBaseList& _feature_list_last);
public:
static ProcessorBasePtr create(const std::string& _unique_name, const ProcessorParamsBasePtr _params, const SensorBasePtr sensor_ptr = nullptr);
static ProcessorBasePtr create(const std::string& _unique_name, const ProcessorParamsBasePtr _params,
const SensorBasePtr sensor_ptr = nullptr);
};
......@@ -183,9 +193,11 @@ inline bool ProcessorImageFeature::voteForKeyFrame()
return (incoming_ptr_->getFeatureList().size() < params_.algorithm.min_features_for_keyframe);
}
inline ConstraintBasePtr ProcessorImageFeature::createConstraint(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr)
inline ConstraintBasePtr ProcessorImageFeature::createConstraint(FeatureBasePtr _feature_ptr,
FeatureBasePtr _feature_other_ptr)
{
ConstraintEpipolarPtr const_epipolar_ptr = std::make_shared<ConstraintEpipolar>(_feature_ptr, _feature_other_ptr, shared_from_this());
ConstraintEpipolarPtr const_epipolar_ptr = std::make_shared < ConstraintEpipolar
> (_feature_ptr, _feature_other_ptr, shared_from_this());
// _feature_ptr->addConstraint(const_epipolar_ptr);
// _feature_other_ptr->addConstrainedBy(const_epipolar_ptr);
return const_epipolar_ptr;
......@@ -193,5 +205,4 @@ inline ConstraintBasePtr ProcessorImageFeature::createConstraint(FeatureBasePtr
} // namespace wolf
#endif // PROCESSOR_IMAGE_FEATURE_H
......@@ -14,6 +14,79 @@
namespace wolf
{
#if defined (HAVE_OPENCV3)
ProcessorImageLandmark::ProcessorImageLandmark(const ProcessorParamsImage& _params) :
ProcessorTrackerLandmark("IMAGE LANDMARK", _params.algorithm.max_new_features, _params.algorithm.time_tolerance),
params_(_params),
active_search_grid_(),
n_feature_(0),
landmark_idx_non_visible_(0)
{
// 1. detector-descriptor params
std::shared_ptr<DetectorDescriptorParamsBase> _dd_params = _params.detector_descriptor_params_ptr;
switch (_dd_params->type){
case DD_BRISK:
{
std::shared_ptr<DetectorDescriptorParamsBrisk> params_brisk = std::static_pointer_cast<DetectorDescriptorParamsBrisk>(_dd_params);
detector_descriptor_ptr_ = cv::BRISK::create(params_brisk->threshold, //
params_brisk->octaves, //
params_brisk->pattern_scale);
detector_descriptor_params_.pattern_radius_ = std::max((unsigned int)((params_brisk->nominal_pattern_radius)*pow(2,params_brisk->octaves)),
(unsigned int)((params_brisk->nominal_pattern_radius)*params_brisk->pattern_scale));
detector_descriptor_params_.size_bits_ = detector_descriptor_ptr_->descriptorSize() * 8;
break;
}
case DD_ORB:
{
std::shared_ptr<DetectorDescriptorParamsOrb> params_orb = std::static_pointer_cast<DetectorDescriptorParamsOrb>(_dd_params);
detector_descriptor_ptr_ = cv::ORB::create(params_orb->nfeatures, //
params_orb->scaleFactor, //
params_orb->nlevels, //
params_orb->edgeThreshold, //
params_orb->firstLevel, //
params_orb->WTA_K, //
params_orb->scoreType, //
params_orb->patchSize);
detector_descriptor_params_.pattern_radius_ = params_orb->edgeThreshold;
detector_descriptor_params_.size_bits_ = detector_descriptor_ptr_->descriptorSize() * 8;
break;
}
default:
throw std::runtime_error("Unknown detector-descriptor type");
}
// 2. matcher params
// TODO: FIX this. Problems initializing with int (cv::DescriptorMatcher::create(int matcherType)
std::string matcherType = "BruteForce-Hamming"; // Default
switch (_params.matcher.similarity_norm)
{
case 1:
matcherType = "FlannBased";
break;
case 2:
matcherType = "BruteForce";
break;
case 3:
matcherType = "BruteForce-L1";
break;
case 4:
matcherType = "BruteForce-Hamming";
break;
case 5:
matcherType = "BruteForce-Hamming(2)";
break;
}
matcher_ptr_ = cv::DescriptorMatcher::create(matcherType);
}
#else
ProcessorImageLandmark::ProcessorImageLandmark(const ProcessorParamsImage& _params) :
ProcessorTrackerLandmark("IMAGE LANDMARK", _params.algorithm.max_new_features, _params.algorithm.time_tolerance),
......@@ -64,9 +137,10 @@ ProcessorImageLandmark::ProcessorImageLandmark(const ProcessorParamsImage& _para
// 2. matcher params
matcher_ptr_ = std::make_shared<cv::BFMatcher>(_params.matcher.similarity_norm);
}
#endif
ProcessorImageLandmark::~ProcessorImageLandmark()
{
//
......
......@@ -14,10 +14,16 @@
#include "constraint_AHP.h"
// OpenCV includes
#include "opencv2/features2d/features2d.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#else
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#endif
// General includes
#include <cmath>
......@@ -33,8 +39,15 @@ WOLF_PTR_TYPEDEFS(ProcessorImageLandmark);
class ProcessorImageLandmark : public ProcessorTrackerLandmark
{
protected:
#if defined (HAVE_OPENCV3)
cv::Ptr<cv::DescriptorMatcher> matcher_ptr_;
cv::Ptr<cv::FeatureDetector> detector_descriptor_ptr_;
#else
std::shared_ptr<cv::DescriptorMatcher> matcher_ptr_;
std::shared_ptr<cv::Feature2D> detector_descriptor_ptr_;
#endif
protected:
ProcessorParamsImage params_; // Struct with parameters of the processors
ActiveSearchGrid active_search_grid_; // Active Search
......
......@@ -12,9 +12,16 @@
#include "logging.h"
//opencv includes
#include "opencv2/features2d/features2d.hpp"
#if defined (HAVE_OPENCV3)
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#else
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#endif
// std include
#include <vector>
......@@ -117,14 +124,25 @@ TEST(RoiORB, RoiBounds)
unsigned int scoreType = 0; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
unsigned int patchSize = 31;
cv::ORB detector(nfeatures, //
scaleFactor, //
nlevels, //
edgeThreshold, //
firstLevel, //
WTA_K, //
scoreType, //
patchSize);
#if defined (HAVE_OPENCV3)
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create(nfeatures, //
scaleFactor, //
nlevels, //
edgeThreshold, //
firstLevel, //
WTA_K, //
scoreType, //
patchSize);
#else
std::shared_ptr<cv::ORB> detector = std::make_shared<cv::ORB>(nfeatures, //
scaleFactor, //
nlevels, //
edgeThreshold, //
firstLevel, //
WTA_K, //
scoreType, //
patchSize);
#endif
std::vector<cv::KeyPoint> target_keypoints;
cv::KeyPointsFilter keypoint_filter;
......@@ -177,7 +195,7 @@ TEST(RoiORB, RoiBounds)
cv::Mat image_roi = image(roi_inflated);
target_keypoints.clear();
detector.detect(image_roi, target_keypoints);
detector->detect(image_roi, target_keypoints);
if (!target_keypoints.empty())
{
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment