Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mobile_robotics/wolf_projects/wolf_lib/plugins/vision
1 result
Show changes
Commits on Source (25)
...@@ -101,13 +101,9 @@ IF(EXISTS "${WOLF_CONFIG_DIR}" AND NOT IS_DIRECTORY "${WOLF_CONFIG_DIR}") ...@@ -101,13 +101,9 @@ IF(EXISTS "${WOLF_CONFIG_DIR}" AND NOT IS_DIRECTORY "${WOLF_CONFIG_DIR}")
message(FATAL_ERROR "Bug: Specified CONFIG_DIR: " message(FATAL_ERROR "Bug: Specified CONFIG_DIR: "
"${WOLF_CONFIG_DIR} exists, but is not a directory.") "${WOLF_CONFIG_DIR} exists, but is not a directory.")
ENDIF() ENDIF()
# Configure config.h # Configure config.h
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/internal/config.h.in "${WOLF_CONFIG_DIR}/config.h") configure_file(${CMAKE_CURRENT_SOURCE_DIR}/internal/config.h.in "${WOLF_CONFIG_DIR}/config.h")
message("CONFIG DIRECTORY ${PROJECT_BINARY_DIR}")
include_directories("${PROJECT_BINARY_DIR}/conf")
# ============ INCLUDES ==================
INCLUDE_DIRECTORIES(BEFORE "include")
# ============ HEADERS ============ # ============ HEADERS ============
SET(HDRS_CAPTURE SET(HDRS_CAPTURE
...@@ -239,9 +235,9 @@ install( ...@@ -239,9 +235,9 @@ install(
${LIB_INSTALL_DIR}/${PLUGIN_NAME}/cmake ${LIB_INSTALL_DIR}/${PLUGIN_NAME}/cmake
) )
# Specifies include directories to use when compiling the plugin target target_include_directories(${PLUGIN_NAME} PUBLIC
# This way, include_directories does not need to be called in plugins depending on this one $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
target_include_directories(${PLUGIN_NAME} INTERFACE $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/conf>
$<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}> $<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
) )
......
...@@ -67,5 +67,9 @@ max_nb_tracks: 100 ...@@ -67,5 +67,9 @@ max_nb_tracks: 100
# standard deviation of the pixel reprojection factor # standard deviation of the pixel reprojection factor
std_pix: 1 std_pix: 1
# before creating a landmark, wait until the track is old enough # Rules to create a new landmark from a track
min_track_length_for_landmark: 3 lmk_creation:
# wait until the track is old enough
min_track_length_for_landmark: 3
# enough pixel distance
min_pixel_dist: 0.0
...@@ -53,6 +53,7 @@ class WKeyPoint ...@@ -53,6 +53,7 @@ class WKeyPoint
WKeyPoint(); WKeyPoint();
WKeyPoint(const cv::KeyPoint& _cv_kp); WKeyPoint(const cv::KeyPoint& _cv_kp);
WKeyPoint(const Eigen::Vector2d& _eig_kp);
size_t getId() const {return id_;} size_t getId() const {return id_;}
void setId(size_t _id) {id_ = _id;} void setId(size_t _id) {id_ = _id;}
...@@ -75,7 +76,7 @@ class WKeyPoint ...@@ -75,7 +76,7 @@ class WKeyPoint
// ID in a frame // ID in a frame
typedef std::unordered_map<size_t, WKeyPoint> KeyPointsMap; typedef std::unordered_map<size_t, WKeyPoint> KeyPointsMap;
// This maps the IDs of the Wolf KeyPoints that are tracked from a frame to the other. // Map the IDs of the Wolf KeyPoints that are tracked from a frame to the other.
// It takes the ID of a WKeyPoint and returns the ID of its track in another Frame. // It takes the ID of a WKeyPoint and returns the ID of its track in another Frame.
typedef std::unordered_map<size_t, size_t> TracksMap; typedef std::unordered_map<size_t, size_t> TracksMap;
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* *
*/ */
#include "core/common/wolf.h" #include <core/common/wolf.h>
#include <iostream> #include <iostream>
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
// wolf plugin includes // wolf plugin includes
#include "vision/internal/config.h"
#include "vision/math/pinhole_tools.h" #include "vision/math/pinhole_tools.h"
#include "vision/sensor/sensor_camera.h" #include "vision/sensor/sensor_camera.h"
#include "vision/capture/capture_image.h" #include "vision/capture/capture_image.h"
...@@ -35,23 +36,37 @@ ...@@ -35,23 +36,37 @@
// wolf includes // wolf includes
#include <core/math/rotations.h> #include <core/math/rotations.h>
#include <core/math/SE3.h>
#include <core/state_block/state_composite.h>
#include <core/processor/processor_tracker.h> #include <core/processor/processor_tracker.h>
#include <core/processor/track_matrix.h> #include <core/processor/track_matrix.h>
#include <core/processor/motion_provider.h>
// Opencv includes // Opencv includes
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp> #include <opencv2/imgcodecs.hpp>
#include <opencv2/features2d.hpp> #include <opencv2/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp> #include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/video/tracking.hpp> #include <opencv2/video/tracking.hpp>
#include <opencv2/core/eigen.hpp>
namespace wolf{ namespace wolf{
/** \brief Buffer of VectorComposite
*
* Object and functions to manage a buffer of VectorComposite objects.
* Used to hold a memory of origin->last relative poses.
*/
class BufferVectorCompositePtr : public Buffer<std::shared_ptr<VectorComposite>> { };
WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorVisualOdometry); WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorVisualOdometry);
struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker, public ParamsMotionProvider
{ {
struct RansacParams struct RansacParams
{ {
...@@ -97,17 +112,24 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker ...@@ -97,17 +112,24 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
} clahe; } clahe;
}; };
struct LandmarkCreationParams
{
unsigned int min_track_length_for_landmark;
double min_pixel_dist;
};
RansacParams ransac; RansacParams ransac;
KltParams klt; KltParams klt;
FastParams fast; FastParams fast;
GridParams grid; GridParams grid;
EqualizationParams equalization; EqualizationParams equalization;
LandmarkCreationParams lmk_creation;
double std_pix; double std_pix;
unsigned int min_track_length_for_landmark;
ParamsProcessorVisualOdometry() = default; ParamsProcessorVisualOdometry() = default;
ParamsProcessorVisualOdometry(std::string _unique_name, const ParamsServer& _server): ParamsProcessorVisualOdometry(std::string _unique_name, const ParamsServer& _server):
ParamsProcessorTracker(_unique_name, _server) ParamsProcessorTracker(_unique_name, _server),
ParamsMotionProvider(_unique_name, _server)
{ {
std_pix = _server.getParam<double>(prefix + _unique_name + "/std_pix"); std_pix = _server.getParam<double>(prefix + _unique_name + "/std_pix");
...@@ -146,7 +168,9 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker ...@@ -146,7 +168,9 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
grid.margin = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/margin"); grid.margin = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/margin");
grid.separation = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/separation"); grid.separation = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/separation");
min_track_length_for_landmark = _server.getParam<unsigned int>(prefix + _unique_name + "/min_track_length_for_landmark"); lmk_creation.min_track_length_for_landmark = _server.getParam<unsigned int>(prefix + _unique_name + "/lmk_creation/min_track_length_for_landmark");
lmk_creation.min_pixel_dist = _server.getParam<double>(prefix + _unique_name + "/lmk_creation/min_pixel_dist");
} }
std::string print() const override std::string print() const override
...@@ -165,18 +189,25 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker ...@@ -165,18 +189,25 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
+ "grid.nbr_cells_v: " + std::to_string(grid.nbr_cells_v) + "\n" + "grid.nbr_cells_v: " + std::to_string(grid.nbr_cells_v) + "\n"
+ "grid.margin: " + std::to_string(grid.margin) + "\n" + "grid.margin: " + std::to_string(grid.margin) + "\n"
+ "grid.separation: " + std::to_string(grid.separation) + "\n" + "grid.separation: " + std::to_string(grid.separation) + "\n"
+ "min_track_length_for_landmark: " + std::to_string(min_track_length_for_landmark) + "\n"; + "lmk_creation.min_track_length_for_landmark: " + std::to_string(lmk_creation.min_track_length_for_landmark) + "\n"
+ "lmk_creation.min_pixel_dist: " + std::to_string(lmk_creation.min_pixel_dist) + "\n";
} }
}; };
WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry); WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry);
class ProcessorVisualOdometry : public ProcessorTracker class ProcessorVisualOdometry : public ProcessorTracker, public MotionProvider
{ {
public: public:
ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_visual_odometry); ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_visual_odometry);
virtual ~ProcessorVisualOdometry() override {}; virtual ~ProcessorVisualOdometry() override {};
// MotionProvider class pure virtual methods
VectorComposite getState(const StateStructure& _structure = "") const override;
TimeStamp getTimeStamp( ) const override;
VectorComposite getState(const TimeStamp& _ts, const StateStructure& _structure = "") const override;
VectorComposite getRelativePoseOriginLast() const;
WOLF_PROCESSOR_CREATE(ProcessorVisualOdometry, ParamsProcessorVisualOdometry); WOLF_PROCESSOR_CREATE(ProcessorVisualOdometry, ParamsProcessorVisualOdometry);
protected: protected:
...@@ -204,6 +235,9 @@ class ProcessorVisualOdometry : public ProcessorTracker ...@@ -204,6 +235,9 @@ class ProcessorVisualOdometry : public ProcessorTracker
// bookeeping // bookeeping
TracksMap tracks_map_li_matched_; TracksMap tracks_map_li_matched_;
// buffer of origin->last camera relative poses mapping origin to last
BufferVectorCompositePtr buffer_pose_cam_ol_;
public: public:
...@@ -250,9 +284,32 @@ class ProcessorVisualOdometry : public ProcessorTracker ...@@ -250,9 +284,32 @@ class ProcessorVisualOdometry : public ProcessorTracker
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation. * \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark * \param _feature_ptr a pointer to the feature used to create the new landmark
* \return a pointer to the created landmark * \return a pointer to the created landmark
*
* Implementation: Use rays of features detections in last frame and create a landmark at 1 meter (arbitrary)
*/
LandmarkBasePtr emplaceLandmarkNaive(FeaturePointImagePtr _feature_ptr);
/**
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark
* \param _track_kf track with only features associated to keyframes that maye be used for initialisation
* \return a pointer to the created landmark. If null, the triangulation failed due to low parallax
*/
LandmarkBasePtr emplaceLandmarkTriangulation(FeaturePointImagePtr _feature_ptr, Track _track_kf);
/**
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark
* \param _track_kf track with only features associated to keyframes that maye be used for initialisation
* \return the triangulated point in P3 homogeneous coordinates
*
* Implementation: try to triangulate a new landmark based on previous frames estimates.
* Apply a numerical test to asses if parallax is enough.
*/ */
LandmarkBasePtr emplaceLandmark(FeatureBasePtr _feature_ptr); bool tryTriangulationFromFeatures(FeaturePointImagePtr _feat, Track _track_kf, Eigen::Vector4d&);
Eigen::Isometry3d getTcw(TimeStamp _ts) const;
/** \brief Advance the incoming Capture to become the last. /** \brief Advance the incoming Capture to become the last.
* *
...@@ -277,19 +334,60 @@ class ProcessorVisualOdometry : public ProcessorTracker ...@@ -277,19 +334,60 @@ class ProcessorVisualOdometry : public ProcessorTracker
/** \brief Remove outliers from the tracks map with a RANSAC 5-points algorithm implemented on openCV /** \brief Remove outliers from the tracks map with a RANSAC 5-points algorithm implemented on openCV
*/ */
bool filterWithEssential(const KeyPointsMap mwkps_prev, const KeyPointsMap mwkps_curr, TracksMap &tracks_prev_curr, cv::Mat &E); bool filterWithEssential(const KeyPointsMap mwkps_prev,
const KeyPointsMap mwkps_curr,
/** \brief Tool to merge tracks TracksMap &tracks_prev_curr,
cv::Mat &E,
VectorComposite &_pose_prev_curr);
/** \brief Merge track maps between moments prev->curr and curr->next to give a track map between prev->next.
*
* \param tracks_prev_curr prev->curr track map
* \param tracks_curr_next curr->next track map
* \return merged track prev->next
*/ */
static TracksMap mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next); static TracksMap mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next);
void setParams(const ParamsProcessorVisualOdometryPtr _params);
const TrackMatrix& getTrackMatrix() const {return track_matrix_;} const TrackMatrix& getTrackMatrix() const {return track_matrix_;}
///////////////////////////////////////
// MotionProvider related methods
VectorComposite getStateFromRelativeOriginLast(VectorComposite co_pose_cl) const;
static VectorComposite pose_from_essential_matrix(const cv::Mat& _E,
const std::vector<cv::Point2d>& p2d_prev,
const std::vector<cv::Point2d>& p2d_curr,
const cv::Mat& _K,
cv::Mat& cvMask);
///////////////////////////////////////
/** \brief sequence of heuristics to decide if a track is worthy of becoming a landmark
*
*/
bool new_landmark_is_viable(int track_id);
private: private:
void retainBest(std::vector<cv::KeyPoint> &_keypoints, int n); void retainBest(std::vector<cv::KeyPoint> &_keypoints, int n);
/* Equalize image for better detection and tracking
* available methods:
* 0. none
* 1. average
* 2. opencv: histogram_equalization
* 3. opencv: CLAHE
*/
void equalize_img(cv::Mat &img_incoming);
void detect_keypoints_empty_grid(cv::Mat img_incoming, CaptureImagePtr capture_image_incoming);
void filter_last_incoming_tracks_from_ransac_result(const TracksMap& tracks_last_incoming, const KeyPointsMap& mwkps_incoming, const TracksMap& tracks_origin_incoming,
TracksMap& tracks_last_incoming_filtered, KeyPointsMap& mwkps_incoming_filtered);
void detect_keypoints_in_empty_grid_cells(cv::Mat img_last, const TracksMap& tracks_last_incoming_filtered, const KeyPointsMap& mwkps_last,
std::vector<cv::KeyPoint>& kps_last_new, KeyPointsMap& mwkps_last_filtered);
}; };
} //namespace wolf } //namespace wolf
......
...@@ -39,7 +39,15 @@ WKeyPoint::WKeyPoint(const cv::KeyPoint& _cv_kp): ...@@ -39,7 +39,15 @@ WKeyPoint::WKeyPoint(const cv::KeyPoint& _cv_kp):
{ {
} }
WKeyPoint::WKeyPoint(const Eigen::Vector2d& _eig_kp):
id_(++id_count_)
{
cv_kp_.pt = cv::Point2f(_eig_kp(0), _eig_kp(1));
}
/////////////////
CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _img) : CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _img) :
CaptureBase("CaptureImage", _ts, _camera_ptr), CaptureBase("CaptureImage", _ts, _camera_ptr),
img_(_img), img_(_img),
......
...@@ -60,7 +60,7 @@ class CaptureImage_test : public testing::Test ...@@ -60,7 +60,7 @@ class CaptureImage_test : public testing::Test
cv_kp2_ = cv::KeyPoint(2.0, 0.0, 0); cv_kp2_ = cv::KeyPoint(2.0, 0.0, 0);
wkp0_ = WKeyPoint(cv_kp0_); wkp0_ = WKeyPoint(cv_kp0_);
wkp1_ = WKeyPoint (cv_kp1_); wkp1_ = WKeyPoint (cv_kp1_);
wkp2_ = WKeyPoint (cv_kp2_); wkp2_ = WKeyPoint (Eigen::Vector2d(cv_kp2_.pt.x, cv_kp2_.pt.y));
} }
}; };
......
...@@ -27,12 +27,17 @@ ...@@ -27,12 +27,17 @@
*/ */
#include <string> #include <string>
#include <Eigen/Dense>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/eigen.hpp>
#include <core/utils/utils_gtest.h> #include <core/utils/utils_gtest.h>
#include <core/sensor/sensor_base.h> #include <core/sensor/sensor_base.h>
#include <core/yaml/parser_yaml.h> #include <core/yaml/parser_yaml.h>
#include <opencv2/imgcodecs.hpp> #include <core/math/rotations.h>
#include <core/processor/track_matrix.h>
#include "core/math/rotations.h"
#include "vision/processor/processor_visual_odometry.h" #include "vision/processor/processor_visual_odometry.h"
#include "vision/sensor/sensor_camera.h" #include "vision/sensor/sensor_camera.h"
...@@ -41,16 +46,18 @@ ...@@ -41,16 +46,18 @@
#include "vision/capture/capture_image.h" #include "vision/capture/capture_image.h"
#include "vision/internal/config.h" #include "vision/internal/config.h"
using namespace wolf; using namespace wolf;
using namespace cv; using namespace cv;
std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR; std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
class ProcessorVisualOdometryTest : public ProcessorVisualOdometry WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry_Wrapper);
class ProcessorVisualOdometry_Wrapper : public ProcessorVisualOdometry
{ {
public: public:
ProcessorVisualOdometryTest(ParamsProcessorVisualOdometryPtr& _params_vo): ProcessorVisualOdometry_Wrapper(ParamsProcessorVisualOdometryPtr& _params_vo):
ProcessorVisualOdometry(_params_vo) ProcessorVisualOdometry(_params_vo)
{ {
...@@ -82,6 +89,12 @@ class ProcessorVisualOdometryTest : public ProcessorVisualOdometry ...@@ -82,6 +89,12 @@ class ProcessorVisualOdometryTest : public ProcessorVisualOdometry
} }
}; };
// namespace wolf{
// // Register in the Factories
// WOLF_REGISTER_PROCESSOR(ProcessorVisualOdometry_Wrapper);
// }
// ////////////////////////////////////////////////////////////////
TEST(ProcessorVisualOdometry, kltTrack) TEST(ProcessorVisualOdometry, kltTrack)
{ {
cv::Mat img = cv::imread(wolf_vision_root + "/test/markers.jpg", cv::IMREAD_GRAYSCALE); cv::Mat img = cv::imread(wolf_vision_root + "/test/markers.jpg", cv::IMREAD_GRAYSCALE);
...@@ -109,7 +122,7 @@ TEST(ProcessorVisualOdometry, kltTrack) ...@@ -109,7 +122,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
params->fast.threshold = 30; params->fast.threshold = 30;
params->fast.non_max_suppresion = true; params->fast.non_max_suppresion = true;
ProcessorVisualOdometryTest processor(params); ProcessorVisualOdometry_Wrapper processor(params);
cv::Ptr<cv::FeatureDetector> detector = processor.getDetector(); cv::Ptr<cv::FeatureDetector> detector = processor.getDetector();
std::vector<cv::KeyPoint> kps; std::vector<cv::KeyPoint> kps;
...@@ -151,7 +164,7 @@ TEST(ProcessorVisualOdometry, kltTrack) ...@@ -151,7 +164,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// params->min_features_for_keyframe = 50; // params->min_features_for_keyframe = 50;
// params->max_nb_tracks = 400; // params->max_nb_tracks = 400;
// params->min_track_length_for_landmark = 4; // params->min_track_length_for_landmark = 4;
// ProcessorVisualOdometryTest processor(params); // ProcessorVisualOdometry_Wrapper processor(params);
// //
// //
// // Install camera // // Install camera
...@@ -214,7 +227,7 @@ TEST(ProcessorVisualOdometry, kltTrack) ...@@ -214,7 +227,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// static_cast<float>(capture_image_incoming->getKeyPoints().size()); // static_cast<float>(capture_image_incoming->getKeyPoints().size());
// ASSERT_TRUE(track_ratio > 0.8); // ASSERT_TRUE(track_ratio > 0.8);
// //
// // Check if tracks_prev and tracks_origin are similar // // Check if tracks_c1 and tracks_origin are similar
// ASSERT_EQ(capture_image_incoming->getTracksPrev().size(), capture_image_incoming->getTracksOrigin().size()); // ASSERT_EQ(capture_image_incoming->getTracksPrev().size(), capture_image_incoming->getTracksOrigin().size());
// //
//} //}
...@@ -265,7 +278,7 @@ TEST(ProcessorVisualOdometry, kltTrack) ...@@ -265,7 +278,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// capture_image_1->process(); // capture_image_1->process();
// //
// ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_1->getTracksPrev().size()); // ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_1->getTracksPrev().size());
// // Check if tracks_prev and tracks_origin are similar // // Check if tracks_c1 and tracks_origin are similar
// ASSERT_EQ(capture_image_1->getTracksPrev().size(), capture_image_1->getTracksOrigin().size()); // ASSERT_EQ(capture_image_1->getTracksPrev().size(), capture_image_1->getTracksOrigin().size());
// //
// // ---------------------------------------- // // ----------------------------------------
...@@ -320,100 +333,226 @@ TEST(ProcessorVisualOdometry, mergeTracks) ...@@ -320,100 +333,226 @@ TEST(ProcessorVisualOdometry, mergeTracks)
} }
cv::Point2f project(Eigen::Matrix3f K, Eigen::Vector3f p){
Eigen::Vector3f ph = K * p;
ph = ph / ph(2,0);
return cv::Point2f(ph(0), ph(1));
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
// Now we deal with multiview geometry so it helps to
// create a map of landmarks to project them
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
class ProcessorVOMultiView_class : public testing::Test{
public:
KeyPointsMap mwkps_c1, mwkps_c2;
TracksMap tracks_c1_c2;
ProcessorVisualOdometry_WrapperPtr processor;
Eigen::Vector3d t_21;
Eigen::Matrix3d R_21;
Eigen::Vector4d k;
cv::Mat Kcv;
void SetUp() override
{
k << 376, 240, 460, 460;
Kcv = (cv::Mat_<double>(3,3) << k(2), 0, k(0),
0, k(3), k(1),
0, 0, 1);
// Create a processor
ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
params->grid.margin = 10;
params->grid.nbr_cells_h = 8;
params->grid.nbr_cells_v = 8;
params->grid.separation = 10;
params->ransac.prob = 0.999; // 0.99 makes the gtest fail -> missing one point
params->ransac.thresh = 1.0;
processor = std::make_shared<ProcessorVisualOdometry_Wrapper>(params);
// Install camera
ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
intr->pinhole_model_raw = k; // Necessary so that the internal Kcv camera matrix is configured properly
intr->distortion = Eigen::Vector4d::Zero();
intr->width = 752;
intr->height = 480;
SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0, 0,0,0,1).finished(), intr);
processor->configure(cam_ptr);
// Set 3D points on the previous camera frame -> all in front of the camera is better
Eigen::Vector3d X1_c1(1.0, 1.0, 2.0);
Eigen::Vector3d X2_c1(-1.0, -1.0, 2.0);
Eigen::Vector3d X3_c1(-0.75, -0.75, 3.0);
Eigen::Vector3d X4_c1(-0.75, 0.75, 2.5);
Eigen::Vector3d X5_c1(0.75, -0.75, 2.0);
Eigen::Vector3d X6_c1(0.0, 1.0, 2.0);
Eigen::Vector3d X7_c1(0.1, -1.0, 3.0);
Eigen::Vector3d X8_c1(0.75, 0.75, 2.0);
// Project pixels in the previous view
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X1_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X2_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X3_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X4_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X5_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X6_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X7_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X8_c1))));
// Set the transformation between views 1 and 2
t_21 = Vector3d(0.1, 0.1, 0.0);
// Eigen::Vector3d euler(0.0, 0.0, 0.0); // degenerate case!
Eigen::Vector3d euler(0.2, 0.1, 0.3);
R_21 = e2R(euler);
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X1_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X2_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X3_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X4_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X5_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X6_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X7_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X8_c1 + t_21))));
// Build the tracksMap
for (size_t i=0; i<mwkps_c2.size(); ++i)
{
tracks_c1_c2.insert(std::pair<size_t, size_t>(i,i));
}
}
};
TEST_F(ProcessorVOMultiView_class, filterWithEssential)
{
cv::Mat E;
// Let's try FilterWithEssential, all points here are inliers
VectorComposite toto; // empty VectorComposite -> does not recoverPose
bool success = processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, toto);
ASSERT_TRUE(success);
ASSERT_EQ(tracks_c1_c2.size(), mwkps_c2.size());
////////////////////////////////////////////////////////////////////
// We had here an outlier: a keyPoint that doesn't move
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
tracks_c1_c2.insert(std::pair<size_t, size_t>(8,8));
// point at 8 must be discarded
success = processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, toto);
ASSERT_TRUE(success);
ASSERT_EQ(tracks_c1_c2.count(8), 0);
} }
TEST(ProcessorVisualOdometry, filterWithEssential)
TEST_F(ProcessorVOMultiView_class, recoverPoseOpenCV)
{ {
KeyPointsMap mwkps_prev, mwkps_curr;
TracksMap tracks_prev_curr; // Check that the computed essential matrix corresponds to the camera movement
// -> recover the orientation and compare to groundtruth
cv::Mat E; cv::Mat E;
// Create a simple camera model // Compute essential matrix, all points here are inliers
Eigen::Matrix3f K; VectorComposite titi;
K << 100, 0, 240, processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, titi);
0, 100, 240,
0, 0, 1;
// Create a processor
ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
params->grid.margin = 10;
params->grid.nbr_cells_h = 8;
params->grid.nbr_cells_v = 8;
params->grid.separation = 10;
params->ransac.prob = 0.999; // 0.99 makes the gtest fail -> missing one point
params->ransac.thresh = 1.0;
ProcessorVisualOdometryTest processor(params);
// Install camera
ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
intr->pinhole_model_raw = Eigen::Vector4d(100, 100, 240, 240);
intr->width = 480;
intr->height = 480;
SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0, 0,0,0,1).finished(), intr);
processor.configure(cam_ptr);
// Set 3D points on the previous view
Eigen::Vector3f X1_prev(1.0, 1.0, 2.0);
Eigen::Vector3f X2_prev(-1.0, -1.0, 2.0);
Eigen::Vector3f X3_prev(-0.75, -0.75, 3.0);
Eigen::Vector3f X4_prev(-0.75, 0.75, 2.5);
Eigen::Vector3f X5_prev(0.75, -0.75, 2.0);
Eigen::Vector3f X6_prev(0.0, 1.0, 2.0);
Eigen::Vector3f X7_prev(0.1, -1.0, 3.0);
Eigen::Vector3f X8_prev(0.75, 0.75, 2.0);
// Project pixels in the previous view
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,X1_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,X2_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,X3_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,X4_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,X5_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,X6_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,X7_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,X8_prev), 1))));
// Set the transformation between the two views
Eigen::Vector3f t(0.1, 0.1, 0.0);
// Eigen::Vector3f euler(0.0, 0.0, 0.0); // degenerate case!
Eigen::Vector3f euler(0.2, 0.1, 0.3);
Eigen::Matrix3f R = e2R(euler);
// Project pixels in the current view ////////////////////////////////////////////////////////////////////
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,R*X1_prev + t), 1)))); // can we retrieve the right orientation from the essential matrix?
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,R*X2_prev + t), 1)))); std::vector<cv::Point2d> pts_c1, pts_c2;
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,R*X3_prev + t), 1)))); for (int i=0; i < mwkps_c1.size(); i++){
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,R*X4_prev + t), 1)))); pts_c1.push_back(mwkps_c1.at(i).getCvKeyPoint().pt);
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,R*X5_prev + t), 1)))); pts_c2.push_back(mwkps_c2.at(i).getCvKeyPoint().pt);
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,R*X6_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,R*X7_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,R*X8_prev + t), 1))));
// Build the tracksMap
for (size_t i=0; i<mwkps_curr.size(); ++i)
{
tracks_prev_curr.insert(std::pair<size_t, size_t>(i,i));
} }
// Let's try FilterWithEssential, all points here are inliers
processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
ASSERT_EQ(tracks_prev_curr.size(), mwkps_curr.size());
// We had here an outlier: a keyPoint that doesn't move cv::Mat R_out, t_out;
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1)))); cv::Mat mask;
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1)))); cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, mask);
tracks_prev_curr.insert(std::pair<size_t, size_t>(8,8));
// point at 8 must be discarded Eigen::Matrix3d R_out_eig, R_21_eig;
processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E); cv::cv2eigen(R_out, R_out_eig);
ASSERT_EQ(tracks_prev_curr.count(8), 0); ASSERT_MATRIX_APPROX(R_21, R_out_eig, 1e-4);
// Same with helper function
VectorComposite pose_21_out = ProcessorVisualOdometry::pose_from_essential_matrix(E, pts_c1, pts_c2, Kcv, mask);
//////////////////////////////////////////////////////
// Can we also use it to detect outliers using cheirality check?
// Does not seem so...
// Maybe the outliers are not rightly chosen for this test:
// inside recoverPose, triangulatePoints is called and then there are
// checks on depth coordinate of the triangulated point in both camera frames:
// if depth is negative or depth lower than a threshold, point considered as an "outlier"
// Can simply mean an absence of movement, hard to tune threshold...
// add outliers
pts_c1.push_back(cv::Point2d(165.0, 190.0));
pts_c2.push_back(cv::Point2d(200.0, 190.0));
pts_c1.push_back(cv::Point2d(100.0, 250.0));
pts_c2.push_back(cv::Point2d(100.0, 250.0));
pts_c1.push_back(cv::Point2d(400.0, 70.0));
pts_c2.push_back(cv::Point2d(400.0, 70.0));
pts_c1.push_back(cv::Point2d(300.0, 300.0));
pts_c2.push_back(cv::Point2d(100.0, 300.0));
mask = 255*cv::Mat::ones(12, 1, CV_64F);
cv::Mat triangulated_pts;
double distance_threshold = 80.0;
// cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, mask);
cv::Mat new_mask;
cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, distance_threshold, new_mask, triangulated_pts);
// triangulated_pts.size()
// std::cout << triangulated_pts.size() << std::endl;
// std::cout << "mask" << std::endl;
// std::cout << mask << std::endl;
// std::cout << "R_out" << std::endl;
// std::cout << R_out << std::endl;
// std::cout << "t_out" << std::endl;
// std::cout << t_out << std::endl;
} }
// // SEFAULT!!!!!!!!
// TEST_F(ProcessorVOMultiView_class, tryTriangulationFromFeatures)
// {
// ProblemPtr problem = Problem::create("PO", 3);
// VectorComposite state1("P0");
// state1['P'] = Vector3d::Zero();
// state1['O'] = Quaterniond::Identity().coeffs();
// FrameBasePtr KF1 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 0.0, "PO", state1);
// CaptureBasePtr c1 = CaptureBase::emplace<CaptureImage>(KF1, 0.0, nullptr, cv::Mat());
// FeatureBasePtr f1 = FeatureBase::emplace<FeaturePointImage>(c1, mwkps_c1.at(0), Matrix2d::Identity());
// VectorComposite state2("P0");
// state2['P'] = Vector3d::Zero();
// state2['O'] = Quaterniond::Identity().coeffs();
// FrameBasePtr KF2 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 1.0, "PO", state1);
// CaptureBasePtr c2 = CaptureBase::emplace<CaptureImage>(KF2, 1.0, nullptr, cv::Mat());
// FeatureBasePtr f2 = FeatureBase::emplace<FeaturePointImage>(c2, mwkps_c2.at(0), Matrix2d::Identity());
// Track track;
// track.insert(std::pair<TimeStamp, FeatureBasePtr>(0.0, f1));
// track.insert(std::pair<TimeStamp, FeatureBasePtr>(1.0, f2));
// Vector4d pt_c;
// auto f2_pi = std::static_pointer_cast<FeaturePointImage>(f2);
// processor->tryTriangulationFromFeatures(f2_pi, track, pt_c);
// WOLF_INFO(pt_c);
// }
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
testing::InitGoogleTest(&argc, argv); testing::InitGoogleTest(&argc, argv);
......
...@@ -52,5 +52,9 @@ max_nb_tracks: 30 ...@@ -52,5 +52,9 @@ max_nb_tracks: 30
# standard deviation of the pixel reprojection factor # standard deviation of the pixel reprojection factor
std_pix: 1 std_pix: 1
# before creating a landmark, wait until the track is old enough # Rules to create a new landmark from a track
min_track_length_for_landmark: 20 lmk_creation:
# wait until the track is old enough
min_track_length_for_landmark: 20
# enough pixel distance
min_pixel_dist: 0.0