Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mobile_robotics/wolf_projects/wolf_lib/plugins/vision
1 result
Show changes
Commits on Source (25)
......@@ -101,13 +101,9 @@ IF(EXISTS "${WOLF_CONFIG_DIR}" AND NOT IS_DIRECTORY "${WOLF_CONFIG_DIR}")
message(FATAL_ERROR "Bug: Specified CONFIG_DIR: "
"${WOLF_CONFIG_DIR} exists, but is not a directory.")
ENDIF()
# Configure config.h
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/internal/config.h.in "${WOLF_CONFIG_DIR}/config.h")
message("CONFIG DIRECTORY ${PROJECT_BINARY_DIR}")
include_directories("${PROJECT_BINARY_DIR}/conf")
# ============ INCLUDES ==================
INCLUDE_DIRECTORIES(BEFORE "include")
# ============ HEADERS ============
SET(HDRS_CAPTURE
......@@ -239,9 +235,9 @@ install(
${LIB_INSTALL_DIR}/${PLUGIN_NAME}/cmake
)
# Specifies include directories to use when compiling the plugin target
# This way, include_directories does not need to be called in plugins depending on this one
target_include_directories(${PLUGIN_NAME} INTERFACE
target_include_directories(${PLUGIN_NAME} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/conf>
$<INSTALL_INTERFACE:${INCLUDE_INSTALL_DIR}>
)
......
......@@ -67,5 +67,9 @@ max_nb_tracks: 100
# standard deviation of the pixel reprojection factor
std_pix: 1
# before creating a landmark, wait until the track is old enough
min_track_length_for_landmark: 3
# Rules to create a new landmark from a track
lmk_creation:
# wait until the track is old enough
min_track_length_for_landmark: 3
# enough pixel distance
min_pixel_dist: 0.0
......@@ -53,6 +53,7 @@ class WKeyPoint
WKeyPoint();
WKeyPoint(const cv::KeyPoint& _cv_kp);
WKeyPoint(const Eigen::Vector2d& _eig_kp);
size_t getId() const {return id_;}
void setId(size_t _id) {id_ = _id;}
......@@ -75,7 +76,7 @@ class WKeyPoint
// ID in a frame
typedef std::unordered_map<size_t, WKeyPoint> KeyPointsMap;
// This maps the IDs of the Wolf KeyPoints that are tracked from a frame to the other.
// Map the IDs of the Wolf KeyPoints that are tracked from a frame to the other.
// It takes the ID of a WKeyPoint and returns the ID of its track in another Frame.
typedef std::unordered_map<size_t, size_t> TracksMap;
......
......@@ -32,7 +32,7 @@
*
*/
#include "core/common/wolf.h"
#include <core/common/wolf.h>
#include <iostream>
......
......@@ -25,6 +25,7 @@
// wolf plugin includes
#include "vision/internal/config.h"
#include "vision/math/pinhole_tools.h"
#include "vision/sensor/sensor_camera.h"
#include "vision/capture/capture_image.h"
......@@ -35,23 +36,37 @@
// wolf includes
#include <core/math/rotations.h>
#include <core/math/SE3.h>
#include <core/state_block/state_composite.h>
#include <core/processor/processor_tracker.h>
#include <core/processor/track_matrix.h>
#include <core/processor/motion_provider.h>
// Opencv includes
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/video/tracking.hpp>
#include <opencv2/core/eigen.hpp>
namespace wolf{
/** \brief Buffer of VectorComposite
*
* Object and functions to manage a buffer of VectorComposite objects.
* Used to hold a memory of origin->last relative poses.
*/
class BufferVectorCompositePtr : public Buffer<std::shared_ptr<VectorComposite>> { };
WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorVisualOdometry);
struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker, public ParamsMotionProvider
{
struct RansacParams
{
......@@ -97,17 +112,24 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
} clahe;
};
struct LandmarkCreationParams
{
unsigned int min_track_length_for_landmark;
double min_pixel_dist;
};
RansacParams ransac;
KltParams klt;
FastParams fast;
GridParams grid;
EqualizationParams equalization;
LandmarkCreationParams lmk_creation;
double std_pix;
unsigned int min_track_length_for_landmark;
ParamsProcessorVisualOdometry() = default;
ParamsProcessorVisualOdometry(std::string _unique_name, const ParamsServer& _server):
ParamsProcessorTracker(_unique_name, _server)
ParamsProcessorTracker(_unique_name, _server),
ParamsMotionProvider(_unique_name, _server)
{
std_pix = _server.getParam<double>(prefix + _unique_name + "/std_pix");
......@@ -146,7 +168,9 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
grid.margin = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/margin");
grid.separation = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/separation");
min_track_length_for_landmark = _server.getParam<unsigned int>(prefix + _unique_name + "/min_track_length_for_landmark");
lmk_creation.min_track_length_for_landmark = _server.getParam<unsigned int>(prefix + _unique_name + "/lmk_creation/min_track_length_for_landmark");
lmk_creation.min_pixel_dist = _server.getParam<double>(prefix + _unique_name + "/lmk_creation/min_pixel_dist");
}
std::string print() const override
......@@ -165,18 +189,25 @@ struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
+ "grid.nbr_cells_v: " + std::to_string(grid.nbr_cells_v) + "\n"
+ "grid.margin: " + std::to_string(grid.margin) + "\n"
+ "grid.separation: " + std::to_string(grid.separation) + "\n"
+ "min_track_length_for_landmark: " + std::to_string(min_track_length_for_landmark) + "\n";
+ "lmk_creation.min_track_length_for_landmark: " + std::to_string(lmk_creation.min_track_length_for_landmark) + "\n"
+ "lmk_creation.min_pixel_dist: " + std::to_string(lmk_creation.min_pixel_dist) + "\n";
}
};
WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry);
class ProcessorVisualOdometry : public ProcessorTracker
class ProcessorVisualOdometry : public ProcessorTracker, public MotionProvider
{
public:
ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_visual_odometry);
virtual ~ProcessorVisualOdometry() override {};
// MotionProvider class pure virtual methods
VectorComposite getState(const StateStructure& _structure = "") const override;
TimeStamp getTimeStamp( ) const override;
VectorComposite getState(const TimeStamp& _ts, const StateStructure& _structure = "") const override;
VectorComposite getRelativePoseOriginLast() const;
WOLF_PROCESSOR_CREATE(ProcessorVisualOdometry, ParamsProcessorVisualOdometry);
protected:
......@@ -204,6 +235,9 @@ class ProcessorVisualOdometry : public ProcessorTracker
// bookeeping
TracksMap tracks_map_li_matched_;
// buffer of origin->last camera relative poses mapping origin to last
BufferVectorCompositePtr buffer_pose_cam_ol_;
public:
......@@ -250,9 +284,32 @@ class ProcessorVisualOdometry : public ProcessorTracker
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark
* \return a pointer to the created landmark
*
* Implementation: Use rays of features detections in last frame and create a landmark at 1 meter (arbitrary)
*/
LandmarkBasePtr emplaceLandmarkNaive(FeaturePointImagePtr _feature_ptr);
/**
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark
* \param _track_kf track with only features associated to keyframes that maye be used for initialisation
* \return a pointer to the created landmark. If null, the triangulation failed due to low parallax
*/
LandmarkBasePtr emplaceLandmarkTriangulation(FeaturePointImagePtr _feature_ptr, Track _track_kf);
/**
* \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
* \param _feature_ptr a pointer to the feature used to create the new landmark
* \param _track_kf track with only features associated to keyframes that maye be used for initialisation
* \return the triangulated point in P3 homogeneous coordinates
*
* Implementation: try to triangulate a new landmark based on previous frames estimates.
* Apply a numerical test to asses if parallax is enough.
*/
LandmarkBasePtr emplaceLandmark(FeatureBasePtr _feature_ptr);
bool tryTriangulationFromFeatures(FeaturePointImagePtr _feat, Track _track_kf, Eigen::Vector4d&);
Eigen::Isometry3d getTcw(TimeStamp _ts) const;
/** \brief Advance the incoming Capture to become the last.
*
......@@ -277,19 +334,60 @@ class ProcessorVisualOdometry : public ProcessorTracker
/** \brief Remove outliers from the tracks map with a RANSAC 5-points algorithm implemented on openCV
*/
bool filterWithEssential(const KeyPointsMap mwkps_prev, const KeyPointsMap mwkps_curr, TracksMap &tracks_prev_curr, cv::Mat &E);
/** \brief Tool to merge tracks
bool filterWithEssential(const KeyPointsMap mwkps_prev,
const KeyPointsMap mwkps_curr,
TracksMap &tracks_prev_curr,
cv::Mat &E,
VectorComposite &_pose_prev_curr);
/** \brief Merge track maps between moments prev->curr and curr->next to give a track map between prev->next.
*
* \param tracks_prev_curr prev->curr track map
* \param tracks_curr_next curr->next track map
* \return merged track prev->next
*/
static TracksMap mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next);
void setParams(const ParamsProcessorVisualOdometryPtr _params);
const TrackMatrix& getTrackMatrix() const {return track_matrix_;}
///////////////////////////////////////
// MotionProvider related methods
VectorComposite getStateFromRelativeOriginLast(VectorComposite co_pose_cl) const;
static VectorComposite pose_from_essential_matrix(const cv::Mat& _E,
const std::vector<cv::Point2d>& p2d_prev,
const std::vector<cv::Point2d>& p2d_curr,
const cv::Mat& _K,
cv::Mat& cvMask);
///////////////////////////////////////
/** \brief sequence of heuristics to decide if a track is worthy of becoming a landmark
*
*/
bool new_landmark_is_viable(int track_id);
private:
void retainBest(std::vector<cv::KeyPoint> &_keypoints, int n);
/* Equalize image for better detection and tracking
* available methods:
* 0. none
* 1. average
* 2. opencv: histogram_equalization
* 3. opencv: CLAHE
*/
void equalize_img(cv::Mat &img_incoming);
void detect_keypoints_empty_grid(cv::Mat img_incoming, CaptureImagePtr capture_image_incoming);
void filter_last_incoming_tracks_from_ransac_result(const TracksMap& tracks_last_incoming, const KeyPointsMap& mwkps_incoming, const TracksMap& tracks_origin_incoming,
TracksMap& tracks_last_incoming_filtered, KeyPointsMap& mwkps_incoming_filtered);
void detect_keypoints_in_empty_grid_cells(cv::Mat img_last, const TracksMap& tracks_last_incoming_filtered, const KeyPointsMap& mwkps_last,
std::vector<cv::KeyPoint>& kps_last_new, KeyPointsMap& mwkps_last_filtered);
};
} //namespace wolf
......
......@@ -39,7 +39,15 @@ WKeyPoint::WKeyPoint(const cv::KeyPoint& _cv_kp):
{
}
WKeyPoint::WKeyPoint(const Eigen::Vector2d& _eig_kp):
id_(++id_count_)
{
cv_kp_.pt = cv::Point2f(_eig_kp(0), _eig_kp(1));
}
/////////////////
CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _img) :
CaptureBase("CaptureImage", _ts, _camera_ptr),
img_(_img),
......
......@@ -24,15 +24,18 @@
//standard
#include "vision/processor/processor_visual_odometry.h"
#include <opencv2/imgproc.hpp>
#include <opencv2/core/eigen.hpp>
#include <chrono>
#include <ctime>
namespace wolf{
using namespace SE3;
ProcessorVisualOdometry::ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_vo) :
ProcessorTracker("ProcessorVisualOdometry", "PO", 3, _params_vo),
MotionProvider("PO", _params_vo),
params_visual_odometry_(_params_vo)
{
// Preprocessor stuff
......@@ -51,12 +54,9 @@ void ProcessorVisualOdometry::configure(SensorBasePtr _sensor)
{
//Initialize camera sensor pointer
sen_cam_ = std::static_pointer_cast<SensorCamera>(_sensor);
Eigen::Matrix3d K = sen_cam_->getIntrinsicMatrix();
Kcv_ = (cv::Mat_<float>(3,3) << K(0,0), 0, K(0,2),
0, K(1,1), K(1,2),
0, 0, 1);
// CV type for intrinsic matrix
cv::eigen2cv(sen_cam_->getIntrinsicMatrix(), Kcv_);
// Tessalation of the image
cell_grid_ = ActiveSearchGrid(sen_cam_->getImgWidth(), sen_cam_->getImgHeight(),
......@@ -66,16 +66,6 @@ void ProcessorVisualOdometry::configure(SensorBasePtr _sensor)
params_visual_odometry_->grid.separation);
}
TracksMap ProcessorVisualOdometry::mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next){
TracksMap tracks_prev_next;
for (auto &match : tracks_prev_curr){
if (tracks_curr_next.count(match.second)){
tracks_prev_next[match.first] = tracks_curr_next.at(match.second);
}
}
return tracks_prev_next;
}
void ProcessorVisualOdometry::preProcess()
{
......@@ -84,74 +74,15 @@ void ProcessorVisualOdometry::preProcess()
// Get Capture
capture_image_incoming_ = std::static_pointer_cast<CaptureImage>(incoming_ptr_);
cv::Mat img_incoming = capture_image_incoming_->getImage();
equalize_img(img_incoming);
/* Equalize image for better detection and tracking
* available methods:
* 0. none
* 1. average
* 2. opencv: histogram_equalization
* 3. opencv: CLAHE
*/
switch (params_visual_odometry_->equalization.method)
{
case 0:
break;
case 1:
{
// average to central brightness
auto img_avg = (cv::mean(img_incoming)).val[0];
img_incoming += cv::Scalar(round(params_visual_odometry_->equalization.average.median - img_avg) );
break;
}
case 2:
{
cv::equalizeHist( img_incoming, img_incoming );
break;
}
case 3:
{
// Contrast Limited Adaptive Histogram Equalization CLAHE
// -> more continuous lighting and higher contrast images
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(params_visual_odometry_->equalization.clahe.clip_limit,
params_visual_odometry_->equalization.clahe.tile_grid_size);
clahe->apply(img_incoming, img_incoming);
break;
}
}
// Time to PREPreprocess the image if necessary: greyscale if BGR, CLAHE etc...
// once preprocessing is done, replace the original image (?)
// if first image, compute keypoints, add to capture incoming and return
if (last_ptr_ == nullptr){
detect_keypoints_empty_grid(img_incoming, capture_image_incoming_);
// detect one FAST keypoint in each cell of the grid
cv::Rect rect_roi;
Eigen::Vector2i cell_index;
std::vector<cv::KeyPoint> kps_roi;
for (int i=1; i < params_visual_odometry_->grid.nbr_cells_h-1; i++){
for (int j=1; j < params_visual_odometry_->grid.nbr_cells_v-1; j++){
cell_index << i,j;
cell_grid_.cell2roi(cell_index, rect_roi);
cv::Mat img_roi(img_incoming, rect_roi); // no data copy -> no overhead
detector_->detect(img_roi, kps_roi);
if (kps_roi.size() > 0){
// retain only the best image in each region of interest
retainBest(kps_roi, 1);
// Keypoints are detected in the local coordinates of the region of interest
// -> translate to the full image corner coordinate system
kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
capture_image_incoming_->addKeyPoint(kps_roi.at(0));
}
}
}
WOLF_DEBUG( "Initially detected " , capture_image_incoming_->getKeyPoints().size(), " keypoints in incoming" );
// Initialize the tracks data structure with a "dummy track" where the keypoint is pointing to itself
......@@ -168,6 +99,7 @@ void ProcessorVisualOdometry::preProcess()
return;
}
////////////////////////////////
// FEATURE TRACKING
// Update capture Incoming data
......@@ -186,8 +118,6 @@ void ProcessorVisualOdometry::preProcess()
KeyPointsMap mwkps_last = capture_image_last_ ->getKeyPoints();
KeyPointsMap mwkps_incoming; // init incoming
WOLF_DEBUG( "Tracking " , mwkps_last.size(), " keypoints in last" );
// TracksMap between last and incoming
......@@ -204,21 +134,25 @@ void ProcessorVisualOdometry::preProcess()
// Outliers rejection with essential matrix
cv::Mat E;
filterWithEssential(mwkps_origin, mwkps_incoming, tracks_origin_incoming, E);
VectorComposite pose_ol("PO", {3,4});
int __attribute__((unused)) track_nb_before = tracks_origin_incoming.size();
bool success = filterWithEssential(mwkps_origin, mwkps_incoming, tracks_origin_incoming, E, pose_ol);
WOLF_DEBUG("After RANSAC: ", track_nb_before, "->", tracks_origin_incoming.size())
if (success){
// store result of recoverPose if RANSAC was handled well
buffer_pose_cam_ol_.emplace(origin_ptr_->getTimeStamp(), std::make_shared<VectorComposite>(pose_ol));
}
else{
WOLF_WARN("!!!!!!Essential matrix RANSAC failed!!!!!!!!");
// return; // What should we do in this case?
}
// Edit tracks prev with only inliers wrt origin
// and remove also from mwkps_incoming all the keypoints that have not been tracked
TracksMap tracks_last_incoming_filtered;
KeyPointsMap mwkps_incoming_filtered;
for (auto & track_origin_incoming : tracks_origin_incoming){
for (auto & track_last_incoming : tracks_last_incoming){
if (track_origin_incoming.second == track_last_incoming.second){
tracks_last_incoming_filtered[track_last_incoming.first] = track_last_incoming.second;
mwkps_incoming_filtered[track_last_incoming.second] = mwkps_incoming.at(track_last_incoming.second);
continue;
}
}
}
filter_last_incoming_tracks_from_ransac_result(tracks_last_incoming, mwkps_incoming, tracks_origin_incoming,
tracks_last_incoming_filtered, mwkps_incoming_filtered);
WOLF_DEBUG( "Tracked " , mwkps_incoming_filtered.size(), " inliers in incoming" );
......@@ -231,54 +165,11 @@ void ProcessorVisualOdometry::preProcess()
if (n_tracks_origin < params_visual_odometry_->min_features_for_keyframe)
{
// Erase all keypoints previously added to the cell grid
cell_grid_.renew();
// Add last Keypoints that still form valid tracks between last and incoming
// And create a filtered map for last keypoints
KeyPointsMap mwkps_last_filtered;
for (auto track: tracks_last_incoming_filtered){
mwkps_last_filtered[track.first] = mwkps_last[track.first];
size_t last_kp_id = track.first;
cell_grid_.hitCell(capture_image_last_->getKeyPoints().at(last_kp_id).getCvKeyPoint());
}
// Detect new KeyPoints
std::vector<cv::KeyPoint> kps_last_new;
// Use the grid to detect new keypoints in empty cells
// We try a bunch of times to add keypoints to randomly selected empty regions of interest
for (int i=0; i < params_visual_odometry_->max_new_features; i++){
cv::Rect rect_roi;
bool is_empty = cell_grid_.pickRoi(rect_roi);
if (!is_empty) // no empty cells!
{
break;
}
cv::Mat img_roi(img_last, rect_roi); // no data copy -> no overhead
std::vector<cv::KeyPoint> kps_roi;
detector_->detect(img_roi, kps_roi);
if (kps_roi.size() > 0){
// retain only the best keypoint in each region of interest
retainBest(kps_roi, 1);
// Keypoints are detected in the local coordinates of the region of interest
// -> translate to the full image corner coordinate system
kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
kps_last_new.push_back(kps_roi.at(0));
// update grid with this detection
cell_grid_.hitCell(kps_roi.at(0));
}
else
{
// block this grid's cell so that it is not reused for detection
cell_grid_.blockCell(rect_roi);
}
}
KeyPointsMap mwkps_last_filtered;
detect_keypoints_in_empty_grid_cells(img_last, tracks_last_incoming_filtered, mwkps_last,
kps_last_new, mwkps_last_filtered);
// Create a map of wolf KeyPoints to track only the new ones
KeyPointsMap mwkps_last_new, mwkps_incoming_new;
......@@ -301,7 +192,8 @@ void ProcessorVisualOdometry::preProcess()
// Outliers rejection with essential matrix
// tracks that are not geometrically consistent are removed from tracks_last_incoming_new
filterWithEssential(mwkps_last_filtered, mwkps_incoming_filtered, tracks_last_incoming_filtered, E);
VectorComposite not_used;
filterWithEssential(mwkps_last_filtered, mwkps_incoming_filtered, tracks_last_incoming_filtered, E, not_used);
WOLF_DEBUG("New total : ", n_tracks_origin, " + ", mwkps_incoming_new.size(), " = ", tracks_last_incoming_filtered.size(), " tracks");
......@@ -325,9 +217,20 @@ void ProcessorVisualOdometry::preProcess()
}
TracksMap ProcessorVisualOdometry::mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next){
// merge tracks prev->curr and curr->next to get prev->next
TracksMap tracks_prev_next;
for (auto &match : tracks_prev_curr){
if (tracks_curr_next.count(match.second)){
tracks_prev_next[match.first] = tracks_curr_next.at(match.second);
}
}
return tracks_prev_next;
}
unsigned int ProcessorVisualOdometry::processKnown()
{
auto t1 = std::chrono::system_clock::now();
// Extend the process track matrix by using information stored in the incoming capture
......@@ -341,16 +244,18 @@ unsigned int ProcessorVisualOdometry::processKnown()
size_t id_feat_last = feat_pi_last->getKeyPoint().getId();
// if this feature id is in the last->incoming tracks of capture incoming, the track is continued
// otherwise we store the pair as a newly detected track (for processNew)
TracksMap tracks_map_li = capture_image_incoming_->getTracksPrev();
// the matched pairs are stored in tracks_map_li_matched_ which is used in processNew to select the point that have NOT been match as "new"
if (tracks_map_li.count(id_feat_last)){
// WOLF_DEBUG("A corresponding track has been found for id_feat_last ", id_feat_last );
auto kp_track_li = tracks_map_li.find(id_feat_last);
// create a feature using the corresponding WKeyPoint contained in incoming (hence the "second")
auto feat_inco = FeatureBase::emplace<FeaturePointImage>(
capture_image_incoming_,
capture_image_incoming_->getKeyPoints().at(kp_track_li->second),
pixel_cov_);
// the landmark id of the incoming feature is the same as the last one: 0 if not associated yet, the true landmark id if already associated
feat_inco->setLandmarkId(feat_pi_last->landmarkId());
// extend the track
track_matrix_.add(feat_pi_last->trackId(), feat_inco);
// add tracks_map_li to a vector so that it so that
......@@ -377,23 +282,21 @@ unsigned int ProcessorVisualOdometry::processNew(const int& _max_features)
// So we need to reset the origin tracks of incoming used in preProcess so that they correspond to the future origin (currently last)
capture_image_incoming_->setTracksOrigin(capture_image_incoming_->getTracksPrev());
// We have matched the tracks in the track matrix with the last->incoming tracks
// stored in the TracksMap from getTracksPrev()
// We have matched the tracks in the track matrix with the last->incoming tracks stored in the TracksMap from getTracksPrev()
// Now we need to add new tracks in the track matrix for the NEW tracks.
//
// Use book-keeping prepared in processKnown: the TracksMap that have been matched were stored in tracks_map_li_matched_
// and here add tracks only for those that have not been matched
unsigned int counter_new_tracks = 0;
for (std::pair<size_t,size_t> track_li: capture_image_incoming_->getTracksPrev()){
// if track not matched, then create a new track in the track matrix etc.
// if the track is not matched, then create a new track in the track matrix etc.
if (!tracks_map_li_matched_.count(track_li.first)){
// create a new last feature, a new track using this last feature and add the incoming feature to this track
WKeyPoint kp_last = capture_image_last_->getKeyPoints().at(track_li.first);
WKeyPoint kp_inco = capture_image_incoming_->getKeyPoints().at(track_li.second);
FeaturePointImagePtr feat_pi_last = FeatureBase::emplace<FeaturePointImage>(capture_image_last_, kp_last, pixel_cov_);
FeaturePointImagePtr feat_pi_inco = FeatureBase::emplace<FeaturePointImage>(capture_image_incoming_, kp_inco, pixel_cov_);
track_matrix_.newTrack(feat_pi_last);
track_matrix_.newTrack(feat_pi_last); // newTrack set also the track id of the feature internally
track_matrix_.add(feat_pi_last->trackId(), feat_pi_inco);
counter_new_tracks++;
}
......@@ -406,17 +309,28 @@ unsigned int ProcessorVisualOdometry::processNew(const int& _max_features)
}
bool ProcessorVisualOdometry::new_landmark_is_viable(int track_id)
{
// a track should be old enough so that the keypoints can be considered stable (stable)
bool track_old_enough = track_matrix_.trackSize(track_id) >= params_visual_odometry_->lmk_creation.min_track_length_for_landmark;
// Check if enough parallax has appeared while the camera moves through the scene so as the Landmark can be safely initialized.
// As a heuristic, we can check that the pixel distance between first detection time and current time is over a threshold.
double dist_pix = (track_matrix_.firstFeature(track_id)->getMeasurement() - track_matrix_.lastFeature(track_id)->getMeasurement()).norm();
bool enough_parallax = dist_pix > params_visual_odometry_->lmk_creation.min_pixel_dist;
return track_old_enough && enough_parallax;
}
void ProcessorVisualOdometry::establishFactors()
{
// Function only called when KF is created using last
// Loop over the snapshot in corresponding to last capture. Does 2 things:
// Function only called when a KF is created using at ts last
// Loop over the snapshot of last capture. Does 2 things:
// 1) for tracks already associated to a landmark, create a KF-Lmk factor between the last KF and the landmark.
// 2) if the feature track is not associated to a landmark yet and is long enough, create a new landmark
// using triangulation as a prior, using previous KF current estimates. Create a KF-Lmk factor for all these KFs.
// For bookkeeping, define the landmark id as the track id.
// 2) if the feature track is not associated to a landmark yet and is long enough, create a new landmark.
// Create a KF-Lmk factor for all these KFs.
WOLF_INFO(" establishFactors")
std::list<FeatureBasePtr> tracks_snapshot_last = track_matrix_.snapshotAsList(last_ptr_);
......@@ -430,20 +344,26 @@ void ProcessorVisualOdometry::establishFactors()
{
auto feat_pi = std::static_pointer_cast<FeaturePointImage>(feat);
// verify if a landmark is associated to this track (BAD implementation)
// verify if a landmark is associated to this track
// TODO: use std::find_if instead
LandmarkBasePtr associated_lmk = nullptr;
// linear search on the landmark ids to get the right landmark -> BAD
// An std::map for the Landmark Map would help greatly here
// OPTIM: try to reverse iterate the map?
for (auto lmk: getProblem()->getMap()->getLandmarkList())
{
if (lmk->id() == feat_pi->trackId()){
if (lmk->id() == feat_pi->landmarkId()){
associated_lmk = lmk;
break;
}
}
// 1) create a factor between new KF and assocatiated track landmark
// HYP: assuming the trackid are the same as the landmark ID -> BAD if other types of landmarks involved
if (associated_lmk)
{
LandmarkHpPtr associated_lmk_hp = std::dynamic_pointer_cast<LandmarkHp>(associated_lmk);
assert(associated_lmk_hp && "Selected landmark is not a visual odometry landmark!");
FactorBase::emplace<FactorPixelHp>(feat_pi,
feat_pi,
associated_lmk_hp,
......@@ -451,15 +371,22 @@ void ProcessorVisualOdometry::establishFactors()
params_visual_odometry_->apply_loss_function);
}
// 2) create landmark if track is not associated with one and has enough length
else if(track_matrix_.trackSize(feat->trackId()) >= params_visual_odometry_->min_track_length_for_landmark)
// 2) create a landmark if the track is not associated with one and meets certain criterias
else if(new_landmark_is_viable(feat->trackId()))
{
// std::cout << "NEW valid track \\o/" << std::endl;
LandmarkBasePtr lmk = emplaceLandmark(feat_pi);
lmk->setId(feat_pi->trackId());
// Add factors from all KFs of this track to the new lmk
Track track_kf = track_matrix_.trackAtKeyframes(feat->trackId());
// LandmarkBasePtr lmk = emplaceLandmarkTriangulation(feat_pi, track_kf);
LandmarkBasePtr lmk = emplaceLandmarkNaive(feat_pi);
// Associate all features of the track to the landmark
// (A simpler option would be to set the landmark ID equal to the track ID
// but this would create conflict if landmarks are created by other processors -> not good)
for (auto feat_track: track_matrix_.track(feat->trackId()))
{
feat_track.second->setLandmarkId(lmk->id());
}
for (auto feat_kf: track_kf)
{
LandmarkHpPtr lmk_hp = std::dynamic_pointer_cast<LandmarkHp>(lmk);
......@@ -474,12 +401,93 @@ void ProcessorVisualOdometry::establishFactors()
return;
}
LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmark(FeatureBasePtr _feat)
LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkTriangulation(FeaturePointImagePtr _feat, Track _track_kf)
{
///////////////
// NOT USED YET
///////////////
// at least 2 KF needed to triangulate
if (_track_kf.size() < 2)
{
return nullptr;
}
Vector4d pt_c;
bool success = tryTriangulationFromFeatures(_feat, _track_kf, pt_c);
if (!success)
{
return nullptr;
}
WOLF_INFO("New lmk: ", pt_c.transpose())
auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(),
pt_c,
_feat->getKeyPoint().getDescriptor());
// // Set all IDs equal to track ID
// size_t track_id = _feat->trackId();
// lmk_hp_ptr->setId(track_id);
// _feat->setLandmarkId(track_id);
return lmk_hp_ptr;
}
bool ProcessorVisualOdometry::tryTriangulationFromFeatures(FeaturePointImagePtr _feat, Track _track_kf, Vector4d& pt_c)
{
// heuristic: use oldest feature/KF to triangulate with respect to current time
FeaturePointImagePtr feat_pi1 = std::static_pointer_cast<FeaturePointImage>(_track_kf.begin()->second);
cv::Vec2d pix1, pix2;
// WOLF_INFO("TOTO")
cv::eigen2cv(feat_pi1->getMeasurement(), pix1);
cv::eigen2cv(_feat->getMeasurement(), pix2);
// WOLF_INFO(pix1, pix2)
// create 3x4 projection matrices [K|0] * Tcw
Matrix4d Tcw1 = getTcw(feat_pi1->getCapture()->getTimeStamp()).matrix();
Matrix4d Tcw2 = getTcw(_feat->getCapture()->getTimeStamp()).matrix();
Eigen::Matrix<double, 3, 4> P1 = sen_cam_->getProjectionMatrix() * Tcw1;
Eigen::Matrix<double, 3, 4> P2 = sen_cam_->getProjectionMatrix() * Tcw2;
cv::Mat P1_cv, P2_cv;
cv::eigen2cv(P1, P1_cv);
cv::eigen2cv(P2, P2_cv);
// WOLF_INFO(P1)
// WOLF_INFO(P1_cv)
// perform triangulation of one landmark
cv::Vec4d ptcv_w; // output: homogeneous landmark point expressed in world frame
cv::triangulatePoints(P1_cv, P2_cv, pix1, pix2, ptcv_w);
// WOLF_INFO("YAY: ", ptcv_w)
/////////////////////////////////////////////////////////
// check that triangulation was done with enough parallax
/////////////////////////////////////////////////////////
bool triangulation_is_a_success = true; // Not implemented yet
if(!triangulation_is_a_success)
{
return false;
}
// normalize to make equivalent to a unit quaternion
cv::cv2eigen(ptcv_w, pt_c);
// HACK: to avoid "nans" in residal, set Z = 1
// pt_c(2) = 1 * pt_c(3);
// Normalization necessary since homogeneous point stateblock is supposed to be unitary
pt_c.normalize();
return true;
}
LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkNaive(FeaturePointImagePtr _feat)
{
// Taken from processor_bundle_adjustment
// Initialize the landmark in its ray (based on pixel meas) and using a arbitrary distance
FeaturePointImagePtr feat_pi = std::static_pointer_cast<FeaturePointImage>(_feat);
Eigen::Vector2d point2d = _feat->getMeasurement();
Eigen::Vector3d point3d;
......@@ -495,7 +503,7 @@ LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmark(FeatureBasePtr _feat)
// lmk from camera to world coordinate frame.
Transform<double,3,Isometry> T_w_r
= Translation<double,3>(feat_pi->getFrame()->getP()->getState())
= Translation<double,3>(_feat->getFrame()->getP()->getState())
* Quaterniond(_feat->getFrame()->getO()->getState().data());
Transform<double,3,Isometry> T_r_c
= Translation<double,3>(_feat->getCapture()->getSensorP()->getState())
......@@ -509,23 +517,36 @@ LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmark(FeatureBasePtr _feat)
auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(),
vec_homogeneous_w,
feat_pi->getKeyPoint().getDescriptor());
_feat->getKeyPoint().getDescriptor());
return lmk_hp_ptr;
}
// Set all IDs equal to track ID
size_t track_id = _feat->trackId();
lmk_hp_ptr->setId(track_id);
_feat->setLandmarkId(track_id);
return lmk_hp_ptr;
Eigen::Isometry3d ProcessorVisualOdometry::getTcw(TimeStamp _ts) const
{
// get robot position and orientation at capture's timestamp
const auto& state = getProblem()->getState(_ts, "PO");
const auto& pos = state.at('P');
const auto& ori = state.at('O');
// compute world-to-camera transform
Eigen::Isometry3d T_w_r = Translation3d(pos) * Quaterniond(ori.data());
Eigen::Isometry3d T_r_c = Translation3d(getSensor()->getP()->getState())
* Quaterniond(getSensor()->getP()->getState().data());
// invert transform to camera-to-world and return
return (T_w_r * T_r_c).inverse();
}
void ProcessorVisualOdometry::postProcess()
{
// Delete tracks with no keyframes
// Delete all dead tracks
auto snap_last = track_matrix_.snapshot(capture_image_last_);
for (const auto& track_id : track_matrix_.trackIds())
{
if (track_matrix_.trackAtKeyframes(track_id).empty())
if (snap_last.count(track_id) == 0)
track_matrix_.remove(track_id);
}
......@@ -575,13 +596,6 @@ void ProcessorVisualOdometry::resetDerived()
tracks_map_li_matched_.clear();
}
void ProcessorVisualOdometry::setParams(const ParamsProcessorVisualOdometryPtr _params)
{
params_visual_odometry_ = _params;
}
TracksMap ProcessorVisualOdometry::kltTrack(const cv::Mat _img_prev, const cv::Mat _img_curr, const KeyPointsMap &_mwkps_prev, KeyPointsMap &_mwkps_curr)
{
if (_mwkps_prev.empty()) return TracksMap();
......@@ -652,8 +666,19 @@ TracksMap ProcessorVisualOdometry::kltTrack(const cv::Mat _img_prev, const cv::M
return tracks_prev_curr;
}
bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev, const KeyPointsMap _mwkps_curr, TracksMap &_tracks_prev_curr, cv::Mat &_E)
bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev,
const KeyPointsMap _mwkps_curr,
TracksMap &_tracks_prev_curr,
cv::Mat &_E,
VectorComposite &_pose_prev_curr)
{
// TODO: Check new RANSAC methods introduced in opencv 4.5.0
// https://opencv.org/evaluating-opencvs-new-ransacs/
// We need at least five tracks
// TODO: this case is NOT handled by the rest of the algorithm currently
if (_tracks_prev_curr.size() < 5) return false;
ParamsProcessorVisualOdometry::RansacParams prms = params_visual_odometry_->ransac;
// We need to build lists of pt2d for openCV function
......@@ -661,28 +686,54 @@ bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev
std::vector<size_t> all_indices;
for (auto & track : _tracks_prev_curr){
all_indices.push_back(track.first);
Eigen::Vector2d ray_prev = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_prev.at(track.first).getEigenKeyPoint());
Eigen::Vector2d ray_curr = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_curr.at(track.second).getEigenKeyPoint());
p2d_prev.push_back(cv::Point2d(ray_prev.x(), ray_prev.y()));
p2d_curr.push_back(cv::Point2d(ray_curr.x(), ray_curr.y()));
}
// We need at least five tracks
if (p2d_prev.size() < 5) return false;
// ////////////////////////
// // We may want to use rays instead of pixels
// Eigen::Vector2d ray_prev = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_prev.at(track.first).getEigenKeyPoint());
// Eigen::Vector2d ray_curr = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_curr.at(track.second).getEigenKeyPoint());
// p2d_prev.push_back(cv::Point2d(ray_prev.x(), ray_prev.y()));
// p2d_curr.push_back(cv::Point2d(ray_curr.x(), ray_curr.y()));
// ////////////////////////
// use pixels
p2d_prev.push_back(_mwkps_prev.at(track.first).getCvPoint());
p2d_curr.push_back(_mwkps_curr.at(track.second).getCvPoint());
}
cv::Mat cvMask;
cv::Mat K = cv::Mat::eye(3,3,CV_32F);
double focal = (sen_cam_->getIntrinsicMatrix()(0,0) +
sen_cam_->getIntrinsicMatrix()(1,1)) / 2;
// ////////////////////////
// // If we use rays then the camera matrix to pass is the identity
// cv::Mat K = cv::Mat::eye(3,3,CV_32F);
// double focal = (sen_cam_->getIntrinsicMatrix()(0,0) +
// sen_cam_->getIntrinsicMatrix()(1,1)) / 2;
// // If using rays, thresh divided by the average focal
// _E = cv::findEssentialMat(p2d_prev,
// p2d_curr,
// K,
// cv::RANSAC,
// prms.prob,
// prms.thresh / focal,
// cvMask);
// ////////////////////////
// Essential matrix from pixels
_E = cv::findEssentialMat(p2d_prev,
p2d_curr,
Kcv_,
cv::RANSAC,
prms.prob,
prms.thresh / focal,
prms.thresh,
cvMask);
// recover the pose relative pose if asked
if (_pose_prev_curr.includesStructure("PO")){
// clone the mask not to change it. Otherwise it seems that the mask is reduced to all tracks being outliers...
cv::Mat cvMask_dummy = cvMask.clone();
_pose_prev_curr = pose_from_essential_matrix(_E, p2d_prev, p2d_curr, Kcv_, cvMask_dummy);
}
// Let's remove outliers from tracksMap
for (size_t k=0; k<all_indices.size(); k++){
if (cvMask.at<bool>(k) == 0){
......@@ -693,6 +744,29 @@ bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev
return true;
}
VectorComposite ProcessorVisualOdometry::pose_from_essential_matrix(const cv::Mat& _E,
const std::vector<cv::Point2d>& _p2d_prev,
const std::vector<cv::Point2d>& _p2d_curr,
const cv::Mat& _K,
cv::Mat& _cvMask)
{
cv::Mat R_out, t_out;
cv::recoverPose(_E, _p2d_prev, _p2d_curr, _K, R_out, t_out, _cvMask);
// translation into eigen objects
Eigen::Matrix3d R_eig;
Eigen::Vector3d t_eig;
cv::cv2eigen(R_out, R_eig);
cv::cv2eigen(t_out, t_eig);
VectorComposite pose_prev_curr("PO", {3,4});
pose_prev_curr['P'] = t_eig;
pose_prev_curr['O'] = Quaterniond(R_eig).coeffs();
return pose_prev_curr;
}
void ProcessorVisualOdometry::retainBest(std::vector<cv::KeyPoint> &_keypoints, int n)
{
if (_keypoints.size() > n) {
......@@ -706,6 +780,229 @@ void ProcessorVisualOdometry::retainBest(std::vector<cv::KeyPoint> &_keypoints,
}
}
void ProcessorVisualOdometry::equalize_img(cv::Mat &img_incoming)
{
switch (params_visual_odometry_->equalization.method)
{
case 0:
break;
case 1:
{
// average to central brightness
auto img_avg = (cv::mean(img_incoming)).val[0];
img_incoming += cv::Scalar(round(params_visual_odometry_->equalization.average.median - img_avg) );
break;
}
case 2:
{
cv::equalizeHist( img_incoming, img_incoming );
break;
}
case 3:
{
// Contrast Limited Adaptive Histogram Equalization CLAHE
// -> more continuous lighting and higher contrast images
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(params_visual_odometry_->equalization.clahe.clip_limit,
params_visual_odometry_->equalization.clahe.tile_grid_size);
clahe->apply(img_incoming, img_incoming);
break;
}
}
}
void ProcessorVisualOdometry::detect_keypoints_empty_grid(cv::Mat img_incoming, CaptureImagePtr capture_image_incoming)
{
// detect one FAST keypoint in each cell of the grid
cv::Rect rect_roi;
Eigen::Vector2i cell_index;
std::vector<cv::KeyPoint> kps_roi;
for (int i=1; i < params_visual_odometry_->grid.nbr_cells_h-1; i++){
for (int j=1; j < params_visual_odometry_->grid.nbr_cells_v-1; j++){
cell_index << i,j;
cell_grid_.cell2roi(cell_index, rect_roi);
cv::Mat img_roi(img_incoming, rect_roi); // no data copy -> no overhead
detector_->detect(img_roi, kps_roi);
if (kps_roi.size() > 0){
// retain only the best image in each region of interest
retainBest(kps_roi, 1);
// Keypoints are detected in the local coordinates of the region of interest
// -> translate to the full image corner coordinate system
kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
capture_image_incoming->addKeyPoint(kps_roi.at(0));
}
}
}
}
void ProcessorVisualOdometry::filter_last_incoming_tracks_from_ransac_result(const TracksMap& tracks_last_incoming, const KeyPointsMap& mwkps_incoming, const TracksMap& tracks_origin_incoming,
TracksMap& tracks_last_incoming_filtered, KeyPointsMap& mwkps_incoming_filtered)
{
/*
Filter last -> incoming track (and tracked keypoint map in incoming)
L -------> I
based of the tracks alive after RANSAC check on origin -> incoming
O ---------------------> I
This ugly double loop is due to the fact that features ids in Incoming are in the "values" of both maps, which requires linear search.
Ideally, this may be solved a Boost.Bimap.
**/
for (auto & track_origin_incoming : tracks_origin_incoming){
for (auto & track_last_incoming : tracks_last_incoming){
if (track_origin_incoming.second == track_last_incoming.second){
tracks_last_incoming_filtered[track_last_incoming.first] = track_last_incoming.second;
mwkps_incoming_filtered[track_last_incoming.second] = mwkps_incoming.at(track_last_incoming.second);
continue;
}
}
}
}
void ProcessorVisualOdometry::detect_keypoints_in_empty_grid_cells(cv::Mat img_last, const TracksMap& tracks_last_incoming_filtered, const KeyPointsMap& mwkps_last,
std::vector<cv::KeyPoint>& kps_last_new, KeyPointsMap& mwkps_last_filtered)
{
// Erase all keypoints previously added to the cell grid
cell_grid_.renew();
// Add last Keypoints that still form valid tracks between last and incoming
// And create a filtered map for last keypoints
for (auto track: tracks_last_incoming_filtered){
mwkps_last_filtered[track.first] = mwkps_last.at(track.first);
size_t last_kp_id = track.first;
cell_grid_.hitCell(capture_image_last_->getKeyPoints().at(last_kp_id).getCvKeyPoint());
}
// Use the grid to detect new keypoints in empty cells
// We try a bunch of times to add keypoints to randomly selected empty regions of interest
for (int i=0; i < params_visual_odometry_->max_new_features; i++){
cv::Rect rect_roi;
bool is_empty = cell_grid_.pickRoi(rect_roi);
if (!is_empty) // no empty cells!
{
break;
}
cv::Mat img_roi(img_last, rect_roi); // no data copy -> no overhead
std::vector<cv::KeyPoint> kps_roi;
detector_->detect(img_roi, kps_roi);
if (kps_roi.size() > 0){
// retain only the best keypoint in each region of interest
retainBest(kps_roi, 1);
// Keypoints are detected in the local coordinates of the region of interest
// -> translate to the full image corner coordinate system
kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
kps_last_new.push_back(kps_roi.at(0));
// update grid with this detection
cell_grid_.hitCell(kps_roi.at(0));
}
else
{
// block this grid's cell so that it is not reused for detection
cell_grid_.blockCell(rect_roi);
}
}
}
/////////////////////////
// MotionProvider methods
/////////////////////////
VectorComposite ProcessorVisualOdometry::getState( const StateStructure& _structure ) const
{
return getState(getTimeStamp(), _structure);
}
TimeStamp ProcessorVisualOdometry::getTimeStamp() const
{
if ( last_ptr_ == nullptr )
return TimeStamp::Invalid();
else
return last_ptr_->getTimeStamp();
}
VectorComposite ProcessorVisualOdometry::getState(const TimeStamp& _ts, const StateStructure& _structure) const
{
// valid last...
if (last_ptr_ != origin_ptr_)
{
std::shared_ptr<VectorComposite> pose_cam_ol_ptr = buffer_pose_cam_ol_.selectFirstBefore(_ts, getTimeTolerance());
if( !pose_cam_ol_ptr )
{
WOLF_WARN(getName(), ": Requested state with time stamp out of tolerance. Returning empty VectorComposite");
return VectorComposite();
}
else
{
return *pose_cam_ol_ptr;
}
}
// return state at origin if possible
else
{
if (origin_ptr_ == nullptr || fabs(_ts - origin_ptr_->getTimeStamp()) > params_->time_tolerance)
{
WOLF_WARN(getName(), ": Requested state with time stamp out of tolerance. Returning empty VectorComposite");
return VectorComposite();
}
else
return origin_ptr_->getFrame()->getState("PO");
}
}
VectorComposite ProcessorVisualOdometry::getStateFromRelativeOriginLast(VectorComposite co_pose_cl) const
{
if (origin_ptr_ == nullptr or
origin_ptr_->isRemoving() or
last_ptr_ == nullptr or
last_ptr_->getFrame() == nullptr) // We do not have any info of where to find a valid state
// Further checking here for origin_ptr is redundant: if last=null, then origin=null too.
{
WOLF_WARN("Processor has no state. Returning an empty VectorComposite");
return VectorComposite(); // return empty state
}
VectorComposite r_pose_c = getSensor()->getState();
VectorComposite c_pose_r = inverse(r_pose_c);
// composte poses to obtain the relative robot transformation
VectorComposite ro_pose_rl = SE3::compose(r_pose_c, SE3::compose(co_pose_cl, c_pose_r));
// Pose at origin
VectorComposite w_pose_ro = getOrigin()->getFrame()->getState();
// Pose at last using composition
VectorComposite w_pose_rl = SE3::compose(w_pose_ro, ro_pose_rl);
WOLF_INFO("I WAS CALLED!!!")
return w_pose_rl;
}
} //namespace wolf
// Register in the FactoryProcessor
......
......@@ -60,7 +60,7 @@ class CaptureImage_test : public testing::Test
cv_kp2_ = cv::KeyPoint(2.0, 0.0, 0);
wkp0_ = WKeyPoint(cv_kp0_);
wkp1_ = WKeyPoint (cv_kp1_);
wkp2_ = WKeyPoint (cv_kp2_);
wkp2_ = WKeyPoint (Eigen::Vector2d(cv_kp2_.pt.x, cv_kp2_.pt.y));
}
};
......
......@@ -27,12 +27,17 @@
*/
#include <string>
#include <Eigen/Dense>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/eigen.hpp>
#include <core/utils/utils_gtest.h>
#include <core/sensor/sensor_base.h>
#include <core/yaml/parser_yaml.h>
#include <opencv2/imgcodecs.hpp>
#include "core/math/rotations.h"
#include <core/math/rotations.h>
#include <core/processor/track_matrix.h>
#include "vision/processor/processor_visual_odometry.h"
#include "vision/sensor/sensor_camera.h"
......@@ -41,16 +46,18 @@
#include "vision/capture/capture_image.h"
#include "vision/internal/config.h"
using namespace wolf;
using namespace cv;
std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
class ProcessorVisualOdometryTest : public ProcessorVisualOdometry
WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry_Wrapper);
class ProcessorVisualOdometry_Wrapper : public ProcessorVisualOdometry
{
public:
ProcessorVisualOdometryTest(ParamsProcessorVisualOdometryPtr& _params_vo):
ProcessorVisualOdometry_Wrapper(ParamsProcessorVisualOdometryPtr& _params_vo):
ProcessorVisualOdometry(_params_vo)
{
......@@ -82,6 +89,12 @@ class ProcessorVisualOdometryTest : public ProcessorVisualOdometry
}
};
// namespace wolf{
// // Register in the Factories
// WOLF_REGISTER_PROCESSOR(ProcessorVisualOdometry_Wrapper);
// }
// ////////////////////////////////////////////////////////////////
TEST(ProcessorVisualOdometry, kltTrack)
{
cv::Mat img = cv::imread(wolf_vision_root + "/test/markers.jpg", cv::IMREAD_GRAYSCALE);
......@@ -109,7 +122,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
params->fast.threshold = 30;
params->fast.non_max_suppresion = true;
ProcessorVisualOdometryTest processor(params);
ProcessorVisualOdometry_Wrapper processor(params);
cv::Ptr<cv::FeatureDetector> detector = processor.getDetector();
std::vector<cv::KeyPoint> kps;
......@@ -151,7 +164,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// params->min_features_for_keyframe = 50;
// params->max_nb_tracks = 400;
// params->min_track_length_for_landmark = 4;
// ProcessorVisualOdometryTest processor(params);
// ProcessorVisualOdometry_Wrapper processor(params);
//
//
// // Install camera
......@@ -214,7 +227,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// static_cast<float>(capture_image_incoming->getKeyPoints().size());
// ASSERT_TRUE(track_ratio > 0.8);
//
// // Check if tracks_prev and tracks_origin are similar
// // Check if tracks_c1 and tracks_origin are similar
// ASSERT_EQ(capture_image_incoming->getTracksPrev().size(), capture_image_incoming->getTracksOrigin().size());
//
//}
......@@ -265,7 +278,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
// capture_image_1->process();
//
// ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_1->getTracksPrev().size());
// // Check if tracks_prev and tracks_origin are similar
// // Check if tracks_c1 and tracks_origin are similar
// ASSERT_EQ(capture_image_1->getTracksPrev().size(), capture_image_1->getTracksOrigin().size());
//
// // ----------------------------------------
......@@ -320,100 +333,226 @@ TEST(ProcessorVisualOdometry, mergeTracks)
}
cv::Point2f project(Eigen::Matrix3f K, Eigen::Vector3f p){
Eigen::Vector3f ph = K * p;
ph = ph / ph(2,0);
return cv::Point2f(ph(0), ph(1));
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
// Now we deal with multiview geometry so it helps to
// create a map of landmarks to project them
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
class ProcessorVOMultiView_class : public testing::Test{
public:
KeyPointsMap mwkps_c1, mwkps_c2;
TracksMap tracks_c1_c2;
ProcessorVisualOdometry_WrapperPtr processor;
Eigen::Vector3d t_21;
Eigen::Matrix3d R_21;
Eigen::Vector4d k;
cv::Mat Kcv;
void SetUp() override
{
k << 376, 240, 460, 460;
Kcv = (cv::Mat_<double>(3,3) << k(2), 0, k(0),
0, k(3), k(1),
0, 0, 1);
// Create a processor
ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
params->grid.margin = 10;
params->grid.nbr_cells_h = 8;
params->grid.nbr_cells_v = 8;
params->grid.separation = 10;
params->ransac.prob = 0.999; // 0.99 makes the gtest fail -> missing one point
params->ransac.thresh = 1.0;
processor = std::make_shared<ProcessorVisualOdometry_Wrapper>(params);
// Install camera
ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
intr->pinhole_model_raw = k; // Necessary so that the internal Kcv camera matrix is configured properly
intr->distortion = Eigen::Vector4d::Zero();
intr->width = 752;
intr->height = 480;
SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0, 0,0,0,1).finished(), intr);
processor->configure(cam_ptr);
// Set 3D points on the previous camera frame -> all in front of the camera is better
Eigen::Vector3d X1_c1(1.0, 1.0, 2.0);
Eigen::Vector3d X2_c1(-1.0, -1.0, 2.0);
Eigen::Vector3d X3_c1(-0.75, -0.75, 3.0);
Eigen::Vector3d X4_c1(-0.75, 0.75, 2.5);
Eigen::Vector3d X5_c1(0.75, -0.75, 2.0);
Eigen::Vector3d X6_c1(0.0, 1.0, 2.0);
Eigen::Vector3d X7_c1(0.1, -1.0, 3.0);
Eigen::Vector3d X8_c1(0.75, 0.75, 2.0);
// Project pixels in the previous view
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X1_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X2_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X3_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X4_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X5_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X6_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X7_c1))));
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X8_c1))));
// Set the transformation between views 1 and 2
t_21 = Vector3d(0.1, 0.1, 0.0);
// Eigen::Vector3d euler(0.0, 0.0, 0.0); // degenerate case!
Eigen::Vector3d euler(0.2, 0.1, 0.3);
R_21 = e2R(euler);
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X1_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X2_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X3_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X4_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X5_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X6_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X7_c1 + t_21))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X8_c1 + t_21))));
// Build the tracksMap
for (size_t i=0; i<mwkps_c2.size(); ++i)
{
tracks_c1_c2.insert(std::pair<size_t, size_t>(i,i));
}
}
};
TEST_F(ProcessorVOMultiView_class, filterWithEssential)
{
cv::Mat E;
// Let's try FilterWithEssential, all points here are inliers
VectorComposite toto; // empty VectorComposite -> does not recoverPose
bool success = processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, toto);
ASSERT_TRUE(success);
ASSERT_EQ(tracks_c1_c2.size(), mwkps_c2.size());
////////////////////////////////////////////////////////////////////
// We had here an outlier: a keyPoint that doesn't move
mwkps_c1.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
mwkps_c2.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
tracks_c1_c2.insert(std::pair<size_t, size_t>(8,8));
// point at 8 must be discarded
success = processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, toto);
ASSERT_TRUE(success);
ASSERT_EQ(tracks_c1_c2.count(8), 0);
}
TEST(ProcessorVisualOdometry, filterWithEssential)
TEST_F(ProcessorVOMultiView_class, recoverPoseOpenCV)
{
KeyPointsMap mwkps_prev, mwkps_curr;
TracksMap tracks_prev_curr;
// Check that the computed essential matrix corresponds to the camera movement
// -> recover the orientation and compare to groundtruth
cv::Mat E;
// Create a simple camera model
Eigen::Matrix3f K;
K << 100, 0, 240,
0, 100, 240,
0, 0, 1;
// Create a processor
ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
params->grid.margin = 10;
params->grid.nbr_cells_h = 8;
params->grid.nbr_cells_v = 8;
params->grid.separation = 10;
params->ransac.prob = 0.999; // 0.99 makes the gtest fail -> missing one point
params->ransac.thresh = 1.0;
ProcessorVisualOdometryTest processor(params);
// Install camera
ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
intr->pinhole_model_raw = Eigen::Vector4d(100, 100, 240, 240);
intr->width = 480;
intr->height = 480;
SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0, 0,0,0,1).finished(), intr);
processor.configure(cam_ptr);
// Set 3D points on the previous view
Eigen::Vector3f X1_prev(1.0, 1.0, 2.0);
Eigen::Vector3f X2_prev(-1.0, -1.0, 2.0);
Eigen::Vector3f X3_prev(-0.75, -0.75, 3.0);
Eigen::Vector3f X4_prev(-0.75, 0.75, 2.5);
Eigen::Vector3f X5_prev(0.75, -0.75, 2.0);
Eigen::Vector3f X6_prev(0.0, 1.0, 2.0);
Eigen::Vector3f X7_prev(0.1, -1.0, 3.0);
Eigen::Vector3f X8_prev(0.75, 0.75, 2.0);
// Project pixels in the previous view
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,X1_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,X2_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,X3_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,X4_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,X5_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,X6_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,X7_prev), 1))));
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,X8_prev), 1))));
// Set the transformation between the two views
Eigen::Vector3f t(0.1, 0.1, 0.0);
// Eigen::Vector3f euler(0.0, 0.0, 0.0); // degenerate case!
Eigen::Vector3f euler(0.2, 0.1, 0.3);
Eigen::Matrix3f R = e2R(euler);
// Compute essential matrix, all points here are inliers
VectorComposite titi;
processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E, titi);
// Project pixels in the current view
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,R*X1_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,R*X2_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,R*X3_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,R*X4_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,R*X5_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,R*X6_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,R*X7_prev + t), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,R*X8_prev + t), 1))));
// Build the tracksMap
for (size_t i=0; i<mwkps_curr.size(); ++i)
{
tracks_prev_curr.insert(std::pair<size_t, size_t>(i,i));
////////////////////////////////////////////////////////////////////
// can we retrieve the right orientation from the essential matrix?
std::vector<cv::Point2d> pts_c1, pts_c2;
for (int i=0; i < mwkps_c1.size(); i++){
pts_c1.push_back(mwkps_c1.at(i).getCvKeyPoint().pt);
pts_c2.push_back(mwkps_c2.at(i).getCvKeyPoint().pt);
}
// Let's try FilterWithEssential, all points here are inliers
processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
ASSERT_EQ(tracks_prev_curr.size(), mwkps_curr.size());
// We had here an outlier: a keyPoint that doesn't move
mwkps_prev.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
mwkps_curr.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
tracks_prev_curr.insert(std::pair<size_t, size_t>(8,8));
cv::Mat R_out, t_out;
cv::Mat mask;
cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, mask);
// point at 8 must be discarded
processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
ASSERT_EQ(tracks_prev_curr.count(8), 0);
Eigen::Matrix3d R_out_eig, R_21_eig;
cv::cv2eigen(R_out, R_out_eig);
ASSERT_MATRIX_APPROX(R_21, R_out_eig, 1e-4);
// Same with helper function
VectorComposite pose_21_out = ProcessorVisualOdometry::pose_from_essential_matrix(E, pts_c1, pts_c2, Kcv, mask);
//////////////////////////////////////////////////////
// Can we also use it to detect outliers using cheirality check?
// Does not seem so...
// Maybe the outliers are not rightly chosen for this test:
// inside recoverPose, triangulatePoints is called and then there are
// checks on depth coordinate of the triangulated point in both camera frames:
// if depth is negative or depth lower than a threshold, point considered as an "outlier"
// Can simply mean an absence of movement, hard to tune threshold...
// add outliers
pts_c1.push_back(cv::Point2d(165.0, 190.0));
pts_c2.push_back(cv::Point2d(200.0, 190.0));
pts_c1.push_back(cv::Point2d(100.0, 250.0));
pts_c2.push_back(cv::Point2d(100.0, 250.0));
pts_c1.push_back(cv::Point2d(400.0, 70.0));
pts_c2.push_back(cv::Point2d(400.0, 70.0));
pts_c1.push_back(cv::Point2d(300.0, 300.0));
pts_c2.push_back(cv::Point2d(100.0, 300.0));
mask = 255*cv::Mat::ones(12, 1, CV_64F);
cv::Mat triangulated_pts;
double distance_threshold = 80.0;
// cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, mask);
cv::Mat new_mask;
cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, distance_threshold, new_mask, triangulated_pts);
// triangulated_pts.size()
// std::cout << triangulated_pts.size() << std::endl;
// std::cout << "mask" << std::endl;
// std::cout << mask << std::endl;
// std::cout << "R_out" << std::endl;
// std::cout << R_out << std::endl;
// std::cout << "t_out" << std::endl;
// std::cout << t_out << std::endl;
}
// // SEFAULT!!!!!!!!
// TEST_F(ProcessorVOMultiView_class, tryTriangulationFromFeatures)
// {
// ProblemPtr problem = Problem::create("PO", 3);
// VectorComposite state1("P0");
// state1['P'] = Vector3d::Zero();
// state1['O'] = Quaterniond::Identity().coeffs();
// FrameBasePtr KF1 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 0.0, "PO", state1);
// CaptureBasePtr c1 = CaptureBase::emplace<CaptureImage>(KF1, 0.0, nullptr, cv::Mat());
// FeatureBasePtr f1 = FeatureBase::emplace<FeaturePointImage>(c1, mwkps_c1.at(0), Matrix2d::Identity());
// VectorComposite state2("P0");
// state2['P'] = Vector3d::Zero();
// state2['O'] = Quaterniond::Identity().coeffs();
// FrameBasePtr KF2 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 1.0, "PO", state1);
// CaptureBasePtr c2 = CaptureBase::emplace<CaptureImage>(KF2, 1.0, nullptr, cv::Mat());
// FeatureBasePtr f2 = FeatureBase::emplace<FeaturePointImage>(c2, mwkps_c2.at(0), Matrix2d::Identity());
// Track track;
// track.insert(std::pair<TimeStamp, FeatureBasePtr>(0.0, f1));
// track.insert(std::pair<TimeStamp, FeatureBasePtr>(1.0, f2));
// Vector4d pt_c;
// auto f2_pi = std::static_pointer_cast<FeaturePointImage>(f2);
// processor->tryTriangulationFromFeatures(f2_pi, track, pt_c);
// WOLF_INFO(pt_c);
// }
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
......
......@@ -52,5 +52,9 @@ max_nb_tracks: 30
# standard deviation of the pixel reprojection factor
std_pix: 1
# before creating a landmark, wait until the track is old enough
min_track_length_for_landmark: 20
# Rules to create a new landmark from a track
lmk_creation:
# wait until the track is old enough
min_track_length_for_landmark: 20
# enough pixel distance
min_pixel_dist: 0.0