diff --git a/include/vision/capture/capture_image.h b/include/vision/capture/capture_image.h
index c31f03fb61799b4e3fdf98d0fa787f1c59e2dda2..8af8156b1da6231ffaf8e4e78a3fc5a90d593a18 100644
--- a/include/vision/capture/capture_image.h
+++ b/include/vision/capture/capture_image.h
@@ -53,6 +53,7 @@ class WKeyPoint
 
         WKeyPoint();
         WKeyPoint(const cv::KeyPoint& _cv_kp);
+        WKeyPoint(const Eigen::Vector2d& _eig_kp);
 
         size_t getId() const {return id_;}
         void setId(size_t _id) {id_ = _id;}
diff --git a/include/vision/processor/processor_visual_odometry.h b/include/vision/processor/processor_visual_odometry.h
index 9c6fd947338f3a2f9513fa800cf0c22a4d23254f..d58369e4a99cf87c6c9966aea46d5ebeafa513d8 100644
--- a/include/vision/processor/processor_visual_odometry.h
+++ b/include/vision/processor/processor_visual_odometry.h
@@ -253,18 +253,27 @@ class ProcessorVisualOdometry : public ProcessorTracker
          * 
          * Implementation: Use rays of features detections in last frame and create a landmark at 1 meter (arbitrary) 
          */
-        LandmarkBasePtr emplaceLandmarkNaive(FeatureBasePtr _feature_ptr);
+        LandmarkBasePtr emplaceLandmarkNaive(FeaturePointImagePtr _feature_ptr);
 
         /**
          * \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
          * \param _feature_ptr a pointer to the feature used to create the new landmark
          * \param _track_kf track with only features associated to keyframes that maye be used for initialisation
          * \return a pointer to the created landmark. If null, the triangulation failed due to low parallax
+         */
+        LandmarkBasePtr emplaceLandmarkTriangulation(FeaturePointImagePtr _feature_ptr, Track _track_kf);
+
+                /**
+         * \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
+         * \param _feature_ptr a pointer to the feature used to create the new landmark
+         * \param _track_kf track with only features associated to keyframes that maye be used for initialisation
+         * \return the triangulated point in P3 homogeneous coordinates
          * 
          * Implementation: try to triangulate a new landmark based on previous frames estimates.
          * Apply a numerical test to asses if parallax is enough.
          */
-        LandmarkBasePtr emplaceLandmarkTriangulation(FeatureBasePtr _feature_ptr, Track _track_kf);
+        bool tryTriangulationFromFeatures(FeaturePointImagePtr _feat, Track _track_kf, Eigen::Vector4d&);
+
 
         Eigen::Isometry3d getTcw(TimeStamp _ts) const;
 
diff --git a/src/capture/capture_image.cpp b/src/capture/capture_image.cpp
index 50483ce754afc88cdb8e293c41b068598b064b9d..63bdb614427f23c59b9023df7b49b172f201cd58 100644
--- a/src/capture/capture_image.cpp
+++ b/src/capture/capture_image.cpp
@@ -39,7 +39,15 @@ WKeyPoint::WKeyPoint(const cv::KeyPoint& _cv_kp):
 {
 }
 
+WKeyPoint::WKeyPoint(const Eigen::Vector2d& _eig_kp):
+    id_(++id_count_)
+{
+    cv_kp_.pt = cv::Point2f(_eig_kp(0), _eig_kp(1));
+}
+
+
 
+/////////////////
 CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _img) :
     CaptureBase("CaptureImage", _ts, _camera_ptr),
     img_(_img),
diff --git a/src/processor/processor_visual_odometry.cpp b/src/processor/processor_visual_odometry.cpp
index a8e398289a1bb892cac30fade2b081ea2106088c..e430256f357961da9c4b1d5e125bed28859b7eda 100644
--- a/src/processor/processor_visual_odometry.cpp
+++ b/src/processor/processor_visual_odometry.cpp
@@ -375,28 +375,48 @@ void ProcessorVisualOdometry::establishFactors()
 }
 
 
-LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkTriangulation(FeatureBasePtr _feat, Track _track_kf)
+LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkTriangulation(FeaturePointImagePtr _feat, Track _track_kf)
 {
     // at least 2 KF needed to triangulate
     if (_track_kf.size() < 2)
     {
         return nullptr;
     }
+    Vector4d pt_c;
+    bool success = tryTriangulationFromFeatures(_feat, _track_kf, pt_c);
+    if (!success)
+    {
+        return nullptr;
+    }
 
+    WOLF_INFO("New lmk: ", pt_c.transpose())
+    auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(), 
+                                                        pt_c, 
+                                                        _feat->getKeyPoint().getDescriptor());
+
+    // Set all IDs equal to track ID
+    size_t track_id = _feat->trackId();
+    lmk_hp_ptr->setId(track_id);
+    _feat->setLandmarkId(track_id);
+
+    return lmk_hp_ptr;
+}
+
+
+bool ProcessorVisualOdometry::tryTriangulationFromFeatures(FeaturePointImagePtr _feat, Track _track_kf, Vector4d& pt_c)
+{
     // heuristic: use oldest feature/KF to triangulate with respect to current time
     FeaturePointImagePtr feat_pi1 = std::static_pointer_cast<FeaturePointImage>(_track_kf.begin()->second);
-    // Feature at current time
-    FeaturePointImagePtr feat_pi2 = std::static_pointer_cast<FeaturePointImage>(_feat);
 
     cv::Vec2d pix1, pix2;
     // WOLF_INFO("TOTO")
     cv::eigen2cv(feat_pi1->getMeasurement(), pix1);
-    cv::eigen2cv(feat_pi2->getMeasurement(), pix2);
+    cv::eigen2cv(_feat->getMeasurement(), pix2);
     // WOLF_INFO(pix1, pix2)
     
     // create 3x4 projection matrices [K|0] * Tcw 
     Matrix4d Tcw1 = getTcw(feat_pi1->getCapture()->getTimeStamp()).matrix();
-    Matrix4d Tcw2 = getTcw(feat_pi2->getCapture()->getTimeStamp()).matrix();
+    Matrix4d Tcw2 = getTcw(_feat->getCapture()->getTimeStamp()).matrix();
     Eigen::Matrix<double, 3, 4> P1 = sen_cam_->getProjectionMatrix() * Tcw1;
     Eigen::Matrix<double, 3, 4> P2 = sen_cam_->getProjectionMatrix() * Tcw2;
     cv::Mat P1_cv, P2_cv;
@@ -410,46 +430,33 @@ LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkTriangulation(FeatureBas
     cv::triangulatePoints(P1_cv, P2_cv, pix1, pix2, ptcv_w);
     // WOLF_INFO("YAY: ", ptcv_w)
 
-
     /////////////////////////////////////////////////////////
     // check that triangulation was done with enough parallax   
     /////////////////////////////////////////////////////////
     bool triangulation_is_a_success = true;  // Not implemented yet
     if(!triangulation_is_a_success)
     {
-        return nullptr;
+        return false;
     }
 
     // normalize to make equivalent to a unit quaternion
-    Eigen::Vector4d pt_c;
     cv::cv2eigen(ptcv_w, pt_c);
     
     // HACK: to avoid "nans" in residal, set Z = 1
-    pt_c(2) = 1 * pt_c(3);
+    // pt_c(2) = 1 * pt_c(3);
 
-    // Normilization necessary since homogeneous point stateblock is supposed to be unitary 
+    // Normalization necessary since homogeneous point stateblock is supposed to be unitary 
     pt_c.normalize();
 
-    WOLF_INFO("New lmk: ", pt_c.transpose())
-    auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(), 
-                                                        pt_c, 
-                                                        feat_pi2->getKeyPoint().getDescriptor());
-
-    // Set all IDs equal to track ID
-    size_t track_id = _feat->trackId();
-    lmk_hp_ptr->setId(track_id);
-    _feat->setLandmarkId(track_id);
-
-    return lmk_hp_ptr;
+    return true;
 }
 
 
-LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkNaive(FeatureBasePtr _feat)
+LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkNaive(FeaturePointImagePtr _feat)
 {
     // Taken from processor_bundle_adjustment
     // Initialize the landmark in its ray (based on pixel meas) and using a arbitrary distance
 
-    FeaturePointImagePtr feat_pi = std::static_pointer_cast<FeaturePointImage>(_feat);
     Eigen::Vector2d point2d = _feat->getMeasurement();
 
     Eigen::Vector3d point3d;
@@ -465,7 +472,7 @@ LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkNaive(FeatureBasePtr _fe
 
     // lmk from camera to world coordinate frame.
     Transform<double,3,Isometry> T_w_r
-        = Translation<double,3>(feat_pi->getFrame()->getP()->getState())
+        = Translation<double,3>(_feat->getFrame()->getP()->getState())
         * Quaterniond(_feat->getFrame()->getO()->getState().data());
     Transform<double,3,Isometry> T_r_c
 		= Translation<double,3>(_feat->getCapture()->getSensorP()->getState())
@@ -479,7 +486,7 @@ LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmarkNaive(FeatureBasePtr _fe
 
     auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(), 
                                                         vec_homogeneous_w, 
-                                                        feat_pi->getKeyPoint().getDescriptor());
+                                                        _feat->getKeyPoint().getDescriptor());
 
     // Set all IDs equal to track ID
     size_t track_id = _feat->trackId();
@@ -653,25 +660,47 @@ bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev
     std::vector<size_t> all_indices;
     for (auto & track : _tracks_prev_curr){
         all_indices.push_back(track.first);
-        Eigen::Vector2d ray_prev = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_prev.at(track.first).getEigenKeyPoint());
-        Eigen::Vector2d ray_curr = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_curr.at(track.second).getEigenKeyPoint());
-        p2d_prev.push_back(cv::Point2d(ray_prev.x(), ray_prev.y()));
-        p2d_curr.push_back(cv::Point2d(ray_curr.x(), ray_curr.y()));
+
+        // ////////////////////////
+        // // We may want to use rays instead of pixels
+        // Eigen::Vector2d ray_prev = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_prev.at(track.first).getEigenKeyPoint());
+        // Eigen::Vector2d ray_curr = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_curr.at(track.second).getEigenKeyPoint());
+        // p2d_prev.push_back(cv::Point2d(ray_prev.x(), ray_prev.y()));
+        // p2d_curr.push_back(cv::Point2d(ray_curr.x(), ray_curr.y()));
+        // ////////////////////////
+
+        // use pixels 
+        p2d_prev.push_back(_mwkps_prev.at(track.first).getCvPoint());
+        p2d_curr.push_back(_mwkps_curr.at(track.first).getCvPoint());
     }
 
     cv::Mat cvMask;
-    cv::Mat K = cv::Mat::eye(3,3,CV_32F);
-    double focal = (sen_cam_->getIntrinsicMatrix()(0,0) +
-                    sen_cam_->getIntrinsicMatrix()(1,1)) / 2;
 
+    // ////////////////////////
+    // // If we use rays then the    
+    // cv::Mat K = cv::Mat::eye(3,3,CV_32F);
+    // double focal = (sen_cam_->getIntrinsicMatrix()(0,0) +
+    //                 sen_cam_->getIntrinsicMatrix()(1,1)) / 2;
+
+    // // If using rays, thresh dived by the average focal
+    // _E = cv::findEssentialMat(p2d_prev, 
+    //                           p2d_curr, 
+    //                           K, 
+    //                           cv::RANSAC,
+    //                           prms.prob,
+    //                           prms.thresh / focal,
+    //                           cvMask);
+    // ////////////////////////
+    
+    // Essential matrix from pixels
     _E = cv::findEssentialMat(p2d_prev, 
                               p2d_curr, 
                               Kcv_, 
                               cv::RANSAC,
                               prms.prob,
-                              prms.thresh / focal,
+                              prms.thresh,
                               cvMask);
-    
+
     // Let's remove outliers from tracksMap
     for (size_t k=0; k<all_indices.size(); k++){
         if (cvMask.at<bool>(k) == 0){
diff --git a/test/gtest_capture_image.cpp b/test/gtest_capture_image.cpp
index 76fc8e6fe8429ac040749a8e83e5000a6474d913..5ba8d359ba05f447728a15ec0eef8f9012f324d4 100644
--- a/test/gtest_capture_image.cpp
+++ b/test/gtest_capture_image.cpp
@@ -60,7 +60,7 @@ class CaptureImage_test : public testing::Test
             cv_kp2_ = cv::KeyPoint(2.0, 0.0, 0);
             wkp0_ = WKeyPoint(cv_kp0_);
             wkp1_ = WKeyPoint  (cv_kp1_);
-            wkp2_ = WKeyPoint  (cv_kp2_);
+            wkp2_ = WKeyPoint  (Eigen::Vector2d(cv_kp2_.pt.x, cv_kp2_.pt.y));
         }
 };
 
diff --git a/test/gtest_processor_visual_odometry.cpp b/test/gtest_processor_visual_odometry.cpp
index 2cb1b28a1f9a81c7d8162b97ad9b3a3607a58e39..d57f18376dd44555bd5a439b12acb27f417bcd7c 100644
--- a/test/gtest_processor_visual_odometry.cpp
+++ b/test/gtest_processor_visual_odometry.cpp
@@ -27,12 +27,17 @@
  */
 
 #include <string>
+
+#include <Eigen/Dense>
+
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/core/eigen.hpp>
+
 #include <core/utils/utils_gtest.h>
 #include <core/sensor/sensor_base.h>
 #include <core/yaml/parser_yaml.h>
-#include <opencv2/imgcodecs.hpp>
-
-#include "core/math/rotations.h"
+#include <core/math/rotations.h>
+#include <core/processor/track_matrix.h>
 
 #include "vision/processor/processor_visual_odometry.h"
 #include "vision/sensor/sensor_camera.h"
@@ -41,6 +46,7 @@
 #include "vision/capture/capture_image.h"
 #include "vision/internal/config.h"
 
+
 using namespace wolf;
 using namespace cv;
 
@@ -221,7 +227,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
 //                        static_cast<float>(capture_image_incoming->getKeyPoints().size());
 //    ASSERT_TRUE(track_ratio > 0.8);
 //
-//    // Check if tracks_prev and tracks_origin are similar
+//    // Check if tracks_c1 and tracks_origin are similar
 //    ASSERT_EQ(capture_image_incoming->getTracksPrev().size(), capture_image_incoming->getTracksOrigin().size());
 //
 //}
@@ -272,7 +278,7 @@ TEST(ProcessorVisualOdometry, kltTrack)
 //    capture_image_1->process();
 //
 //    ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_1->getTracksPrev().size());
-//    // Check if tracks_prev and tracks_origin are similar
+//    // Check if tracks_c1 and tracks_origin are similar
 //    ASSERT_EQ(capture_image_1->getTracksPrev().size(), capture_image_1->getTracksOrigin().size());
 //
 //    // ----------------------------------------
@@ -338,30 +344,22 @@ TEST(ProcessorVisualOdometry, mergeTracks)
 ////////////////////////////////////////////////////////
 ////////////////////////////////////////////////////////
 
-
-
-cv::Point2f project(Eigen::Matrix3f K, Eigen::Vector3f p){
-        Eigen::Vector3f ph = K * p;
-        ph = ph / ph(2,0);
-        return cv::Point2f(ph(0), ph(1));
-}
-
 class ProcessorVOMultiView_class : public testing::Test{
 
     public:
-        KeyPointsMap mwkps_prev, mwkps_curr;
-        TracksMap tracks_prev_curr;
+        KeyPointsMap mwkps_c1, mwkps_c2;
+        TracksMap tracks_c1_c2;
         ProcessorVisualOdometry_WrapperPtr processor;
-        Eigen::Matrix3f K;
-
+        Eigen::Vector3d t_21;
+        Eigen::Matrix3d R_21;
+        Eigen::Vector4d k;
+        cv::Mat Kcv;
         void SetUp() override
         {
-
-            // Create a simple camera model
-            K << 100, 0, 240,
-                0, 100, 240,
-                0, 0, 1;
-            
+            k << 376, 240, 460, 460;
+            Kcv = (cv::Mat_<double>(3,3) << k(2), 0,    k(0),
+                                            0,    k(3), k(1),
+                                            0,    0,    1);
             // Create a processor
             ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
             params->grid.margin = 10;
@@ -374,52 +372,53 @@ class ProcessorVOMultiView_class : public testing::Test{
 
             // Install camera
             ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
-            intr->pinhole_model_raw = Eigen::Vector4d(100, 100, 240, 240);
-            intr->width  = 480;
+            intr->pinhole_model_raw = k;  // Necessary so that the internal Kcv camera matrix is configured properly
+            intr->distortion = Eigen::Vector4d::Zero();
+            intr->width  = 752;
             intr->height = 480;
             SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0,  0,0,0,1).finished(), intr);
             processor->configure(cam_ptr);
 
-            // Set 3D points on the previous view
-            Eigen::Vector3f X1_prev(1.0, 1.0, 2.0);
-            Eigen::Vector3f X2_prev(-1.0, -1.0, 2.0);
-            Eigen::Vector3f X3_prev(-0.75, -0.75, 3.0);
-            Eigen::Vector3f X4_prev(-0.75, 0.75, 2.5);
-            Eigen::Vector3f X5_prev(0.75, -0.75, 2.0);
-            Eigen::Vector3f X6_prev(0.0, 1.0, 2.0);
-            Eigen::Vector3f X7_prev(0.1, -1.0, 3.0);
-            Eigen::Vector3f X8_prev(0.75, 0.75, 2.0);
+            // Set 3D points on the previous camera frame -> all in front of the camera is better
+            Eigen::Vector3d X1_c1(1.0, 1.0, 2.0);
+            Eigen::Vector3d X2_c1(-1.0, -1.0, 2.0);
+            Eigen::Vector3d X3_c1(-0.75, -0.75, 3.0);
+            Eigen::Vector3d X4_c1(-0.75, 0.75, 2.5);
+            Eigen::Vector3d X5_c1(0.75, -0.75, 2.0);
+            Eigen::Vector3d X6_c1(0.0, 1.0, 2.0);
+            Eigen::Vector3d X7_c1(0.1, -1.0, 3.0);
+            Eigen::Vector3d X8_c1(0.75, 0.75, 2.0);
 
             // Project pixels in the previous view
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,X1_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,X2_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,X3_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,X4_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,X5_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,X6_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,X7_prev), 1))));
-            mwkps_prev.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,X8_prev), 1))));
-
-            // Set the transformation between the two views
-            Eigen::Vector3f t(0.1, 0.1, 0.0);
-            // Eigen::Vector3f euler(0.0, 0.0, 0.0);  // degenerate case!
-            Eigen::Vector3f euler(0.2, 0.1, 0.3);
-            Eigen::Matrix3f R = e2R(euler);
-
-            // Project pixels in the current view
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,R*X1_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,R*X2_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,R*X3_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,R*X4_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,R*X5_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,R*X6_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,R*X7_prev + t), 1))));
-            mwkps_curr.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,R*X8_prev + t), 1))));  
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X1_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X2_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X3_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X4_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X5_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X6_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X7_c1))));
+            mwkps_c1.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, X8_c1))));
+
+            // Set the transformation between views 1 and 2 
+            t_21 = Vector3d(0.1, 0.1, 0.0);
+            // Eigen::Vector3d euler(0.0, 0.0, 0.0);  // degenerate case!
+            Eigen::Vector3d euler(0.2, 0.1, 0.3);
+            R_21 = e2R(euler);
+
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X1_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X2_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X3_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X4_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X5_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X6_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X7_c1 + t_21))));
+            mwkps_c2.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(pinhole::projectPoint(k, intr->distortion, R_21*X8_c1 + t_21))));
+
 
             // Build the tracksMap
-            for (size_t i=0; i<mwkps_curr.size(); ++i)
+            for (size_t i=0; i<mwkps_c2.size(); ++i)
             {
-                tracks_prev_curr.insert(std::pair<size_t, size_t>(i,i));
+                tracks_c1_c2.insert(std::pair<size_t, size_t>(i,i));
             }
         }
 };
@@ -430,23 +429,80 @@ TEST_F(ProcessorVOMultiView_class, filterWithEssential)
     cv::Mat E;
 
     // Let's try FilterWithEssential, all points here are inliers
-    processor->filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
-    ASSERT_EQ(tracks_prev_curr.size(), mwkps_curr.size());
+    processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E);
+    ASSERT_EQ(tracks_c1_c2.size(), mwkps_c2.size());
 
+    ////////////////////////////////////////////////////////////////////
     // We had here an outlier: a keyPoint that doesn't move
-    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
-    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
-    tracks_prev_curr.insert(std::pair<size_t, size_t>(8,8)); 
+    mwkps_c1.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
+    mwkps_c2.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
+    tracks_c1_c2.insert(std::pair<size_t, size_t>(8,8)); 
 
     // point at 8 must be discarded
-    processor->filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
-    ASSERT_EQ(tracks_prev_curr.count(8), 0);
+    processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E);
+    ASSERT_EQ(tracks_c1_c2.count(8), 0);
+
+}
+
+
+TEST_F(ProcessorVOMultiView_class, recoverPoseOpenCV)
+{
 
-    // do we retrieve the right orientation from the essential matrix?
+    // Check that the computed essential matrix corresponds to the camera movement
+    // -> recover the orientation and compare to groundtruth
+    cv::Mat E;
+
+    // Compute essential matrix, all points here are inliers
+    processor->filterWithEssential(mwkps_c1, mwkps_c2, tracks_c1_c2, E);
+
+    ////////////////////////////////////////////////////////////////////
+    // can we retrieve the right orientation from the essential matrix?
+    std::vector<cv::Point2f> pts_c1, pts_c2;
+    for (int i=0; i < mwkps_c1.size(); i++){
+        pts_c1.push_back(mwkps_c1.at(i).getCvKeyPoint().pt);
+        pts_c2.push_back(mwkps_c2.at(i).getCvKeyPoint().pt);
+    }
 
+    cv::Mat R_out, t_out;
+    cv::Mat mask;
+    cv::recoverPose(E, pts_c1, pts_c2, Kcv, R_out, t_out, mask);
 
+    Eigen::Matrix3d R_out_eig, R_21_eig;
+    cv::cv2eigen(R_out, R_out_eig);
+    ASSERT_MATRIX_APPROX(R_21, R_out_eig, 1e-4);
 }
 
+
+// SEFAULT!!!!!!!!
+// TEST_F(ProcessorVOMultiView_class, tryTriangulationFromFeatures)
+// {
+//     ProblemPtr problem = Problem::create("PO", 3);
+
+//     VectorComposite state1("P0");
+//     state1['P'] = Vector3d::Zero();
+//     state1['O'] = Quaterniond::Identity().coeffs();
+//     FrameBasePtr KF1 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 0.0, "PO", state1);
+//     CaptureBasePtr c1 = CaptureBase::emplace<CaptureImage>(KF1, 0.0, nullptr, cv::Mat());
+//     FeatureBasePtr f1 = FeatureBase::emplace<FeaturePointImage>(c1, mwkps_c1.at(0), Matrix2d::Identity());
+    
+//     VectorComposite state2("P0");
+//     state2['P'] = Vector3d::Zero();
+//     state2['O'] = Quaterniond::Identity().coeffs();
+//     FrameBasePtr KF2 = FrameBase::emplace<FrameBase>(problem->getTrajectory(), 1.0, "PO", state1);
+//     CaptureBasePtr c2 = CaptureBase::emplace<CaptureImage>(KF2, 1.0, nullptr, cv::Mat());
+//     FeatureBasePtr f2 = FeatureBase::emplace<FeaturePointImage>(c2, mwkps_c2.at(0), Matrix2d::Identity());
+
+//     Track track;
+//     track.insert(std::pair<TimeStamp, FeatureBasePtr>(0.0, f1));
+//     track.insert(std::pair<TimeStamp, FeatureBasePtr>(1.0, f2));
+    
+//     Vector4d pt_c;
+//     auto f2_pi = std::static_pointer_cast<FeaturePointImage>(f2);
+//     processor->tryTriangulationFromFeatures(f2_pi, track, pt_c);
+//     WOLF_INFO(pt_c);
+
+// }
+
 int main(int argc, char **argv)
 {
   testing::InitGoogleTest(&argc, argv);