diff --git a/CMakeLists.txt b/CMakeLists.txt
index bcf7d927cd571eb413e8068362c96b5efd6e583d..81a95ee8bec91fd4f546fd505b898692c305fcf4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -328,7 +328,6 @@ SET(HDRS_FACTOR
   include/base/factor/factor_pose_3D.h
   include/base/factor/factor_quaternion_absolute.h
   include/base/factor/factor_relative_2D_analytic.h
-  include/base/factor/factor_autodiff_trifocal.h
   include/base/factor/factor_autodiff_distance_3D.h
   include/base/factor/factor_block_absolute.h
   include/base/factor/factor_container.h
@@ -373,7 +372,6 @@ SET(HDRS_FACTOR
   include/base/factor/factor_pose_3D.h
   include/base/factor/factor_quaternion_absolute.h
   include/base/factor/factor_relative_2D_analytic.h
-  include/base/factor/factor_autodiff_trifocal.h
   include/base/factor/factor_autodiff_distance_3D.h
   include/base/factor/factor_block_absolute.h
   include/base/factor/factor_container.h
@@ -705,7 +703,6 @@ IF (vision_utils_FOUND)
     include/base/feature/feature_point_image.h
     )
   SET(HDRS_PROCESSOR ${HDRS_PROCESSOR}
-    include/base/processor/processor_tracker_feature_trifocal.h
     include/base/processor/processor_params_image.h
     include/base/processor/processor_tracker_feature_image.h
     include/base/processor/processor_tracker_landmark_image.h
@@ -723,7 +720,6 @@ IF (vision_utils_FOUND)
     src/landmark/landmark_AHP.cpp
     )
   SET(SRCS_PROCESSOR ${SRCS_PROCESSOR}
-    src/processor/processor_tracker_feature_trifocal.cpp
     src/processor/processor_tracker_feature_image.cpp
     src/processor/processor_tracker_landmark_image.cpp
     )
@@ -764,7 +760,6 @@ IF(YAMLCPP_FOUND)
   IF(vision_utils_FOUND)
     SET(SRCS ${SRCS}
       src/yaml/processor_image_yaml.cpp
-      src/yaml/processor_tracker_feature_trifocal_yaml.cpp
       )
   ENDIF(vision_utils_FOUND)
 ENDIF(YAMLCPP_FOUND)
diff --git a/include/base/factor/factor_autodiff_trifocal.h b/include/base/factor/factor_autodiff_trifocal.h
deleted file mode 100644
index d7c2091fb2c547b1511d8f5985e66723a24fb247..0000000000000000000000000000000000000000
--- a/include/base/factor/factor_autodiff_trifocal.h
+++ /dev/null
@@ -1,397 +0,0 @@
-#ifndef _FACTOR_AUTODIFF_TRIFOCAL_H_
-#define _FACTOR_AUTODIFF_TRIFOCAL_H_
-
-//Wolf includes
-//#include "base/wolf.h"
-#include "base/factor/factor_autodiff.h"
-#include "base/sensor/sensor_camera.h"
-
-#include <common_class/trifocaltensor.h>
-#include <vision_utils.h>
-
-namespace wolf
-{
-
-WOLF_PTR_TYPEDEFS(FactorAutodiffTrifocal);
-
-using namespace Eigen;
-
-class FactorAutodiffTrifocal : public FactorAutodiff<FactorAutodiffTrifocal, 3, 3, 4, 3, 4, 3, 4, 3, 4>
-{
-    public:
-
-        /** \brief Class constructor
-         */
-        FactorAutodiffTrifocal(const FeatureBasePtr& _feature_prev_ptr,
-                               const FeatureBasePtr& _feature_origin_ptr,
-                               const FeatureBasePtr& _feature_last_ptr,
-                               const ProcessorBasePtr& _processor_ptr,
-                               bool _apply_loss_function,
-                               FactorStatus _status);
-
-        /** \brief Class Destructor
-         */
-        virtual ~FactorAutodiffTrifocal();
-
-        FeatureBasePtr getFeaturePrev();
-
-        const Vector3s& getPixelCanonicalLast() const
-        {
-            return pixel_canonical_last_;
-        }
-
-        void setPixelCanonicalLast(const Vector3s& pixelCanonicalLast)
-        {
-            pixel_canonical_last_ = pixelCanonicalLast;
-        }
-
-        const Vector3s& getPixelCanonicalOrigin() const
-        {
-            return pixel_canonical_origin_;
-        }
-
-        void setPixelCanonicalOrigin(const Vector3s& pixelCanonicalOrigin)
-        {
-            pixel_canonical_origin_ = pixelCanonicalOrigin;
-        }
-
-        const Vector3s& getPixelCanonicalPrev() const
-        {
-            return pixel_canonical_prev_;
-        }
-
-        void setPixelCanonicalPrev(const Vector3s& pixelCanonicalPrev)
-        {
-            pixel_canonical_prev_ = pixelCanonicalPrev;
-        }
-
-        const Matrix3s& getSqrtInformationUpper() const
-        {
-            return sqrt_information_upper;
-        }
-
-        /** brief : compute the residual from the state blocks being iterated by the solver.
-         **/
-        template<typename T>
-        bool operator ()( const T* const _prev_pos,
-                          const T* const _prev_quat,
-                          const T* const _origin_pos,
-                          const T* const _origin_quat,
-                          const T* const _last_pos,
-                          const T* const _last_quat,
-                          const T* const _sen_pos,
-                          const T* const _sen_quat,
-                          T*             _residuals) const;
-
-    public:
-        template<typename D1, typename D2, class T, typename D3>
-        void expectation(const MatrixBase<D1>&     _wtr1,
-                         const QuaternionBase<D2>& _wqr1,
-                         const MatrixBase<D1>&     _wtr2,
-                         const QuaternionBase<D2>& _wqr2,
-                         const MatrixBase<D1>&     _wtr3,
-                         const QuaternionBase<D2>& _wqr3,
-                         const MatrixBase<D1>&     _rtc,
-                         const QuaternionBase<D2>& _rqc,
-                         vision_utils::TrifocalTensorBase<T>& _tensor,
-                         MatrixBase<D3>&           _c2Ec1) const;
-
-        template<typename T, typename D1>
-        Matrix<T, 3, 1> residual(const vision_utils::TrifocalTensorBase<T>& _tensor,
-                                 const MatrixBase<D1>& _c2Ec1) const;
-
-        // Helper functions to be used by the above
-        template<class T, typename D1, typename D2, typename D3, typename D4>
-        Matrix<T, 3, 1> error_jacobians(const vision_utils::TrifocalTensorBase<T>& _tensor,
-                                        const MatrixBase<D1>& _c2Ec1,
-                                        MatrixBase<D2>& _J_e_m1,
-                                        MatrixBase<D3>& _J_e_m2,
-                                        MatrixBase<D4>& _J_e_m3);
-
-    private:
-        FeatureBaseWPtr feature_prev_ptr_;  // To look for measurements
-        SensorCameraPtr camera_ptr_;        // To look for intrinsics
-        Vector3s pixel_canonical_prev_, pixel_canonical_origin_, pixel_canonical_last_;
-        Matrix3s sqrt_information_upper;
-};
-
-} // namespace wolf
-
-// Includes for implentation
-#include "base/rotations.h"
-
-namespace wolf
-{
-
-using namespace Eigen;
-
-// Constructor
-FactorAutodiffTrifocal::FactorAutodiffTrifocal(const FeatureBasePtr& _feature_prev_ptr,
-                                               const FeatureBasePtr& _feature_origin_ptr,
-                                               const FeatureBasePtr& _feature_last_ptr,
-                                               const ProcessorBasePtr& _processor_ptr,
-                                               bool _apply_loss_function,
-                                               FactorStatus _status) :
-        FactorAutodiff( "TRIFOCAL PLP",
-                        nullptr,
-                        nullptr,
-                        _feature_origin_ptr,
-                        nullptr,
-                        _processor_ptr,
-                        _apply_loss_function,
-                        _status,
-                        _feature_prev_ptr->getFrame()->getP(),
-                        _feature_prev_ptr->getFrame()->getO(),
-                        _feature_origin_ptr->getFrame()->getP(),
-                        _feature_origin_ptr->getFrame()->getO(),
-                        _feature_last_ptr->getFrame()->getP(),
-                        _feature_last_ptr->getFrame()->getO(),
-                        _feature_last_ptr->getCapture()->getSensorP(),
-                        _feature_last_ptr->getCapture()->getSensorO() ),
-        feature_prev_ptr_(_feature_prev_ptr),
-        camera_ptr_(std::static_pointer_cast<SensorCamera>(_processor_ptr->getSensor())),
-        sqrt_information_upper(Matrix3s::Zero())
-{
-    setFeature(_feature_last_ptr);
-    Matrix3s K_inv           = camera_ptr_->getIntrinsicMatrix().inverse();
-    pixel_canonical_prev_    = K_inv * Vector3s(_feature_prev_ptr  ->getMeasurement(0), _feature_prev_ptr  ->getMeasurement(1), 1.0);
-    pixel_canonical_origin_  = K_inv * Vector3s(_feature_origin_ptr->getMeasurement(0), _feature_origin_ptr->getMeasurement(1), 1.0);
-    pixel_canonical_last_    = K_inv * Vector3s(_feature_last_ptr  ->getMeasurement(0), _feature_last_ptr  ->getMeasurement(1), 1.0);
-    Matrix<Scalar,3,2> J_m_u = K_inv.block(0,0,3,2);
-
-    // extract relevant states
-    Vector3s    wtr1 =             _feature_prev_ptr  ->getFrame()  ->getP()      ->getState();
-    Quaternions wqr1 = Quaternions(_feature_prev_ptr  ->getFrame()  ->getO()      ->getState().data() );
-    Vector3s    wtr2 =             _feature_origin_ptr->getFrame()  ->getP()      ->getState();
-    Quaternions wqr2 = Quaternions(_feature_origin_ptr->getFrame()  ->getO()      ->getState().data() );
-    Vector3s    wtr3 =             _feature_last_ptr  ->getFrame()  ->getP()      ->getState();
-    Quaternions wqr3 = Quaternions(_feature_last_ptr  ->getFrame()  ->getO()      ->getState().data() );
-    Vector3s    rtc  =             _feature_last_ptr  ->getCapture()->getSensorP()->getState();
-    Quaternions rqc  = Quaternions(_feature_last_ptr  ->getCapture()->getSensorO()->getState().data() );
-
-    // expectation // canonical units
-    vision_utils::TrifocalTensorBase<Scalar> tensor;
-    Matrix3s    c2Ec1;
-    expectation(wtr1, wqr1,
-                wtr2, wqr2,
-                wtr3, wqr3,
-                rtc, rqc,
-                tensor, c2Ec1);
-
-    // error Jacobians // canonical units
-    Matrix<Scalar,3,3> J_e_m1, J_e_m2, J_e_m3;
-    error_jacobians(tensor, c2Ec1, J_e_m1, J_e_m2, J_e_m3);
-
-    // chain rule to go from homogeneous pixel to Euclidean pixel
-    Matrix<Scalar,3,2> J_e_u1 = J_e_m1 * J_m_u;
-    Matrix<Scalar,3,2> J_e_u2 = J_e_m2 * J_m_u;
-    Matrix<Scalar,3,2> J_e_u3 = J_e_m3 * J_m_u;
-
-    // Error covariances induced by each of the measurement covariance // canonical units
-    Matrix3s Q1 = J_e_u1 * getFeaturePrev() ->getMeasurementCovariance() * J_e_u1.transpose();
-    Matrix3s Q2 = J_e_u2 * getFeatureOther()->getMeasurementCovariance() * J_e_u2.transpose();
-    Matrix3s Q3 = J_e_u3 * getFeature()     ->getMeasurementCovariance() * J_e_u3.transpose();
-
-    // Total error covariance // canonical units
-    Matrix3s Q = Q1 + Q2 + Q3;
-
-    // Sqrt of information matrix // canonical units
-    Eigen::LLT<Eigen::MatrixXs> llt_of_info(Q.inverse()); // Cholesky decomposition
-    sqrt_information_upper = llt_of_info.matrixU();
-
-    // Re-write info matrix (for debug only)
-    //    Scalar pix_noise = 1.0;
-    //    sqrt_information_upper = pow(1.0/pix_noise, 2) * Matrix3s::Identity(); // one PLP (2D) and one epipolar (1D) factor
-}
-
-// Destructor
-FactorAutodiffTrifocal::~FactorAutodiffTrifocal()
-{
-}
-
-inline FeatureBasePtr FactorAutodiffTrifocal::getFeaturePrev()
-{
-    return feature_prev_ptr_.lock();
-}
-
-template<typename T>
-bool FactorAutodiffTrifocal::operator ()( const T* const _prev_pos,
-                                          const T* const _prev_quat,
-                                          const T* const _origin_pos,
-                                          const T* const _origin_quat,
-                                          const T* const _last_pos,
-                                          const T* const _last_quat,
-                                          const T* const _sen_pos,
-                                          const T* const _sen_quat,
-                                          T* _residuals) const
-{
-
-    // MAPS
-    Map<const Matrix<T,3,1> > wtr1(_prev_pos);
-    Map<const Quaternion<T> > wqr1(_prev_quat);
-    Map<const Matrix<T,3,1> > wtr2(_origin_pos);
-    Map<const Quaternion<T> > wqr2(_origin_quat);
-    Map<const Matrix<T,3,1> > wtr3(_last_pos);
-    Map<const Quaternion<T> > wqr3(_last_quat);
-    Map<const Matrix<T,3,1> > rtc (_sen_pos);
-    Map<const Quaternion<T> > rqc (_sen_quat);
-    Map<Matrix<T,3,1> >       res (_residuals);
-
-    vision_utils::TrifocalTensorBase<T> tensor;
-    Matrix<T, 3, 3> c2Ec1;
-    expectation(wtr1, wqr1, wtr2, wqr2, wtr3, wqr3, rtc, rqc, tensor, c2Ec1);
-    res = residual(tensor, c2Ec1);
-    return true;
-}
-
-template<typename D1, typename D2, class T, typename D3>
-inline void FactorAutodiffTrifocal::expectation(const MatrixBase<D1>&     _wtr1,
-                                                const QuaternionBase<D2>& _wqr1,
-                                                const MatrixBase<D1>&     _wtr2,
-                                                const QuaternionBase<D2>& _wqr2,
-                                                const MatrixBase<D1>&     _wtr3,
-                                                const QuaternionBase<D2>& _wqr3,
-                                                const MatrixBase<D1>&     _rtc,
-                                                const QuaternionBase<D2>& _rqc,
-                                                vision_utils::TrifocalTensorBase<T>& _tensor,
-                                                MatrixBase<D3>&     _c2Ec1) const
-{
-
-        typedef Translation<T, 3> TranslationType;
-        typedef Eigen::Transform<T, 3, Eigen::Affine> TransformType;
-
-        // All input Transforms
-        TransformType wHr1 = TranslationType(_wtr1) * _wqr1;
-        TransformType wHr2 = TranslationType(_wtr2) * _wqr2;
-        TransformType wHr3 = TranslationType(_wtr3) * _wqr3;
-        TransformType rHc  = TranslationType(_rtc)  * _rqc ;
-
-        // Relative camera transforms
-        TransformType c1Hc2 = rHc.inverse() * wHr1.inverse() * wHr2 * rHc;
-        TransformType c1Hc3 = rHc.inverse() * wHr1.inverse() * wHr3 * rHc;
-
-        // Projection matrices
-        Matrix<T,3,4> c2Pc1 = c1Hc2.inverse().affine();
-        Matrix<T,3,4> c3Pc1 = c1Hc3.inverse().affine();
-
-        // Trifocal tensor
-        _tensor.computeTensorFromProjectionMat(c2Pc1, c3Pc1);
-
-        /* Essential matrix convention disambiguation
-         *
-         * C1 is the origin frame or reference
-         * C2 is another cam
-         * C2 is specified by R and T wrt C1 so that
-         *   T is the position    of C2 wrt C1
-         *   R is the orientation of C2 wrt C1
-         * There is a 3D point P, noted P1 when expressed in C1 and P2 when expressed in C2:
-         *   P1 = T + R * P2
-         *
-         * Coplanarity condition: a' * (b x c) = 0 with {a,b,c} three coplanar vectors.
-         *
-         * The three vectors are:
-         *
-         *   baseline: b  = T
-         *   ray 1   : r1 = P1
-         *   ray 2   : r2 = P1 - T = R*P2;
-         *
-         * so,
-         *
-         *   (r1)' * (b x r2) = 0 , which develops as:
-         *
-         *   P1' * (T x (R*P2))  = 0
-         *   P1' * [T]x * R * P2 = 0
-         *   P1' * c1Ec2 * P2    = 0 <--- Epipolar factor
-         *
-         * therefore:
-         *   c1Ec2 = [T]x * R        <--- Essential matrix
-         *
-         * or, if we prefer the factor P2' * c2Ec1 * P1 = 0,
-         *   c2Ec1 = c1Ec2' = R' * [T]x (we obviate the sign change)
-         */
-        Matrix<T,3,3> Rtr = c1Hc2.matrix().block(0,0,3,3).transpose();
-        Matrix<T,3,1> t   = c1Hc2.matrix().block(0,3,3,1);
-        _c2Ec1 = Rtr * skew(t);
-//        _c2Ec1 =  c1Hc2.rotation().transpose() * skew(c1Hc2.translation()) ;
-}
-
-template<typename T, typename D1>
-inline Matrix<T, 3, 1> FactorAutodiffTrifocal::residual(const vision_utils::TrifocalTensorBase<T>& _tensor,
-                                                        const MatrixBase<D1>& _c2Ec1) const
-{
-    // 1. COMMON COMPUTATIONS
-
-    // m1, m2, m3: canonical pixels in cams 1,2,3 -- canonical means m = K.inv * u, with _u_ a homogeneous pixel [ux; uy; 1].
-    Matrix<T,3,1> m1(pixel_canonical_prev_  .cast<T>());
-    Matrix<T,3,1> m2(pixel_canonical_origin_.cast<T>());
-    Matrix<T,3,1> m3(pixel_canonical_last_  .cast<T>());
-
-    // 2. TRILINEARITY PLP
-
-    Matrix<T,2,1> e1;
-    vision_utils::errorTrifocalPLP(m1, m2, m3, _tensor, _c2Ec1, e1);
-
-    // 3. EPIPOLAR
-    T e2;
-    vision_utils::errorEpipolar(m1, m2, _c2Ec1, e2);
-
-    // 4. RESIDUAL
-
-    Matrix<T,3,1> errors, residual;
-    errors  << e1, e2;
-    residual = sqrt_information_upper.cast<T>() * errors;
-
-    return residual;
-}
-
-// Helper functions to be used by the above
-template<class T, typename D1, typename D2, typename D3, typename D4>
-inline Matrix<T, 3, 1> FactorAutodiffTrifocal::error_jacobians(const vision_utils::TrifocalTensorBase<T>& _tensor,
-                                                               const MatrixBase<D1>& _c2Ec1,
-                                                               MatrixBase<D2>& _J_e_m1,
-                                                               MatrixBase<D3>& _J_e_m2,
-                                                               MatrixBase<D4>& _J_e_m3)
-{
-    // 1. COMMON COMPUTATIONS
-
-    // m1, m2, m3: canonical pixels in cams 1,2,3 -- canonical means m = K.inv * u, with _u_ a homogeneous pixel [ux; uy; 1].
-    Matrix<T,3,1> m1(pixel_canonical_prev_.cast<T>());
-    Matrix<T,3,1> m2(pixel_canonical_origin_.cast<T>());
-    Matrix<T,3,1> m3(pixel_canonical_last_.cast<T>());
-
-    // 2. TRILINEARITY PLP
-
-    Matrix<T,2,3> J_e1_m1, J_e1_m2, J_e1_m3;
-    Matrix<T,2,1> e1;
-
-    vision_utils::errorTrifocalPLP(m1, m2, m3, _tensor, _c2Ec1, e1, J_e1_m1, J_e1_m2, J_e1_m3);
-
-    // 3. EPIPOLAR
-
-    T e2;
-    Matrix<T,1,3> J_e2_m1, J_e2_m2, J_e2_m3;
-
-    vision_utils::errorEpipolar(m1, m2, _c2Ec1, e2, J_e2_m1, J_e2_m2);
-
-    J_e2_m3.setZero(); // Not involved in epipolar c1->c2
-
-    // Compact Jacobians
-    _J_e_m1.topRows(2) = J_e1_m1;
-    _J_e_m1.row(2)     = J_e2_m1;
-    _J_e_m2.topRows(2) = J_e1_m2;
-    _J_e_m2.row(2)     = J_e2_m2;
-    _J_e_m3.topRows(2) = J_e1_m3;
-    _J_e_m3.row(2)     = J_e2_m3;
-
-    // 4. ERRORS
-
-    Matrix<T,3,1> errors;
-    errors  << e1, e2;
-
-    return errors;
-
-}
-
-}    // namespace wolf
-
-#endif /* _FACTOR_AUTODIFF_TRIFOCAL_H_ */
diff --git a/include/base/processor/processor_tracker_feature_trifocal.h b/include/base/processor/processor_tracker_feature_trifocal.h
deleted file mode 100644
index 03758dbdaecaa24414abfcbf6df272783029f63e..0000000000000000000000000000000000000000
--- a/include/base/processor/processor_tracker_feature_trifocal.h
+++ /dev/null
@@ -1,179 +0,0 @@
-#ifndef _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_
-#define _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_
-
-//Wolf includes
-#include "base/processor/processor_tracker_feature.h"
-#include "base/capture/capture_image.h"
-
-// Vision utils
-#include <vision_utils.h>
-#include <detectors/detector_base.h>
-#include <descriptors/descriptor_base.h>
-#include <matchers/matcher_base.h>
-#include <algorithms/activesearch/alg_activesearch.h>
-
-namespace wolf
-{
-
-WOLF_STRUCT_PTR_TYPEDEFS(ProcessorParamsTrackerFeatureTrifocal);
-
-struct ProcessorParamsTrackerFeatureTrifocal : public ProcessorParamsTrackerFeature
-{
-        std::string yaml_file_params_vision_utils;
-
-        int n_cells_h;
-        int n_cells_v;
-        int min_response_new_feature;
-        Scalar pixel_noise_std; ///< std noise of the pixel
-        int min_track_length_for_factor; ///< Minimum track length of a matched feature to create a factor
-};
-
-WOLF_PTR_TYPEDEFS(ProcessorTrackerFeatureTrifocal);
-
-class ProcessorTrackerFeatureTrifocal : public ProcessorTrackerFeature
-{
-        // Parameters for vision_utils
-    protected:
-        vision_utils::DetectorBasePtr   det_ptr_;
-        vision_utils::DescriptorBasePtr des_ptr_;
-        vision_utils::MatcherBasePtr    mat_ptr_;
-
-    protected:
-
-        ProcessorParamsTrackerFeatureTrifocalPtr params_tracker_feature_trifocal_;      ///< Configuration parameters
-
-        CaptureImagePtr capture_image_last_;
-        CaptureImagePtr capture_image_incoming_;
-        Matrix2s        pixel_cov_;
-
-    private:
-        CaptureBasePtr prev_origin_ptr_;                    ///< Capture previous to origin_ptr_ for the third focus of the trifocal.
-        bool initialized_;                                  ///< Flags the situation where three focus are available: prev_origin, origin, and last.
-
-    public:
-
-        /** \brief Class constructor
-         */
-        ProcessorTrackerFeatureTrifocal( ProcessorParamsTrackerFeatureTrifocalPtr _params_tracker_feature_trifocal );
-
-        /** \brief Class Destructor
-         */
-        virtual ~ProcessorTrackerFeatureTrifocal();
-        virtual void configure(SensorBasePtr _sensor) override;
-
-        /** \brief Track provided features from \b last to \b incoming
-         * \param _features_last_in input list of features in \b last to track
-         * \param _features_incoming_out returned list of features found in \b incoming
-         * \param _feature_matches returned map of correspondences: _feature_matches[feature_out_ptr] = feature_in_ptr
-         */
-        virtual unsigned int trackFeatures(const FeatureBasePtrList& _features_last_in, FeatureBasePtrList& _features_incoming_out, FeatureMatchMap& _feature_matches) override;
-
-        /** \brief Correct the drift in incoming feature by re-comparing against the corresponding feature in origin.
-         * \param _origin_feature input feature in origin capture tracked
-         * \param _incoming_feature input/output feature in incoming capture to be corrected
-         * \return false if the the process discards the correspondence with origin's feature
-         */
-        virtual bool correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature) override;
-
-        /** \brief Vote for KeyFrame generation
-         *
-         * If a KeyFrame criterion is validated, this function returns true,
-         * meaning that it wants to create a KeyFrame at the \b last Capture.
-         *
-         * WARNING! This function only votes! It does not create KeyFrames!
-         */
-        virtual bool voteForKeyFrame() override;
-
-        /** \brief Detect new Features
-         * \param _max_features maximum number of features detected (-1: unlimited. 0: none)
-         * \param _features_last_out The list of detected Features.
-         * \return The number of detected Features.
-         *
-         * This function detects Features that do not correspond to known Features/Landmarks in the system.
-         *
-         * The function is called in ProcessorTrackerFeature::processNew() to set the member new_features_last_,
-         * the list of newly detected features of the capture last_ptr_.
-         */
-        virtual unsigned int detectNewFeatures(const int& _max_new_features, FeatureBasePtrList& _features_last_out) override;
-
-        /** \brief Create a new factor and link it to the wolf tree
-         * \param _feature_ptr pointer to the parent Feature
-         * \param _feature_other_ptr pointer to the other feature constrained.
-         *
-         * Implement this method in derived classes.
-         *
-         * This function creates a factor of the appropriate type for the derived processor.
-         */
-        virtual FactorBasePtr createFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr) override;
-
-        //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-        /** \brief advance pointers
-         *
-         */
-        virtual void advanceDerived() override;
-
-        /** \brief reset pointers and match lists at KF creation
-         *
-         */
-        virtual void resetDerived() override;
-
-        /** \brief Pre-process: check if all captures (prev-origin, origin, last) are initialized to allow factors creation
-         *
-         */
-        virtual void preProcess() override;
-
-        /** \brief Post-process
-         *
-         */
-        virtual void postProcess() override;
-
-        /** \brief Establish factors between features in Captures \b last and \b origin
-         */
-        virtual void establishFactors() override;
-
-        CaptureBasePtr getPrevOrigin();
-
-        bool isInlier(const cv::KeyPoint& _kp_incoming, const cv::KeyPoint& _kp_last);
-
-        void setParams(const ProcessorParamsTrackerFeatureTrifocalPtr _params);
-
-    public:
-
-        /// @brief Factory method
-        static ProcessorBasePtr create(const std::string& _unique_name,
-                                       const ProcessorParamsBasePtr _params,
-                                       const SensorBasePtr _sensor_ptr);
-    private:
-
-        cv::Mat image_debug_;
-
-    public:
-
-        /**
-         * \brief Return Image for debug purposes
-         */
-        cv::Mat getImageDebug();
-
-        /// \brief Get pixel covariance
-        const Matrix2s& pixelCov() const;
-};
-
-inline CaptureBasePtr ProcessorTrackerFeatureTrifocal::getPrevOrigin()
-{
-    return prev_origin_ptr_;
-}
-
-inline cv::Mat ProcessorTrackerFeatureTrifocal::getImageDebug()
-{
-    return image_debug_;
-}
-
-inline const Eigen::Matrix2s& ProcessorTrackerFeatureTrifocal::pixelCov() const
-{
-    return pixel_cov_;
-}
-
-} // namespace wolf
-
-#endif /* _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_ */
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 94154c73e17e8dbcaf13f2a372399bc033613523..20114da7f66f44b53dda372d726a78100a0f536b 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -307,7 +307,6 @@ capture/capture_velocity.h
 capture/capture_wheel_joint_position.h
     )
   SET(HDRS_CONSTRAINT
-factor/factor_autodiff_trifocal.h
 factor/factor_autodiff_distance_3D.h
 factor/factor_AHP.h
 factor/factor_block_absolute.h
@@ -625,9 +624,6 @@ processor/processor_params_image.h
 processor/processor_tracker_feature_image.h
 processor/processor_tracker_landmark_image.h
         )
-    SET(HDRS_PROCESSOR ${HDRS_PROCESSOR}
-processor/processor_tracker_feature_trifocal.h
-    )
   SET(HDRS_LANDMARK ${HDRS_LANDMARK}
 landmark/landmark_point_3D.h
     )
@@ -641,9 +637,6 @@ processor/processor_tracker_landmark_image.cpp
     SET(SRCS_LANDMARK ${SRCS_LANDMARK}
 landmark/landmark_point_3D.cpp
     )
-    SET(SRCS_PROCESSOR ${SRCS_PROCESSOR}
-processor/processor_tracker_feature_trifocal.cpp
-    )
 ENDIF(vision_utils_FOUND)
 
 # Add the capture sub-directory
@@ -698,7 +691,6 @@ IF(YAMLCPP_FOUND)
     IF(vision_utils_FOUND)
         SET(SRCS ${SRCS}
         	yaml/processor_image_yaml.cpp
-        	yaml/processor_tracker_feature_trifocal_yaml.cpp
             )
     ENDIF(vision_utils_FOUND)
 ENDIF(YAMLCPP_FOUND)
diff --git a/src/examples/CMakeLists.txt b/src/examples/CMakeLists.txt
index fb640a17bae87788cb7e85940b094ce8d3a76f93..836caa3d1c337a37eb8680871139251a371353a2 100644
--- a/src/examples/CMakeLists.txt
+++ b/src/examples/CMakeLists.txt
@@ -133,10 +133,6 @@ IF(vision_utils_FOUND)
     # ORB tracker test
     ADD_EXECUTABLE(test_tracker_ORB test_tracker_ORB.cpp)
     TARGET_LINK_LIBRARIES(test_tracker_ORB ${PROJECT_NAME})
-    
-    # Trifocal optimization test
-    ADD_EXECUTABLE(test_trifocal_optimization test_trifocal_optimization.cpp)
-    TARGET_LINK_LIBRARIES(test_trifocal_optimization ${PROJECT_NAME})
 
 ENDIF(vision_utils_FOUND)
 
diff --git a/src/examples/processor_tracker_feature_trifocal.yaml b/src/examples/processor_tracker_feature_trifocal.yaml
deleted file mode 100644
index dfe322d2140abec834c0b9cc2316fd276c125651..0000000000000000000000000000000000000000
--- a/src/examples/processor_tracker_feature_trifocal.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-processor type: "TRACKER FEATURE TRIFOCAL"
-processor name: "tracker feature trifocal example"
-
-vision_utils:
-    YAML file params: processor_tracker_feature_trifocal_vision_utils.yaml
-
-algorithm:
-    time tolerance: 0.005
-    voting active: false
-    minimum features for keyframe: 40
-    maximum new features: 100
-    grid horiz cells: 13
-    grid vert cells:  10
-    min response new features: 50   
-    min track length for constraint: 3
-    
-noise:
-    pixel noise std: 1 # pixels
-    
diff --git a/src/examples/processor_tracker_feature_trifocal_vision_utils.yaml b/src/examples/processor_tracker_feature_trifocal_vision_utils.yaml
deleted file mode 100644
index 7621cd8ad1e516361241b88c9de390bd3ac2abb0..0000000000000000000000000000000000000000
--- a/src/examples/processor_tracker_feature_trifocal_vision_utils.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-sensor:
-  type: "USB_CAM"
-
-detector:
-  type: "GFTT"
-  maxCorners: 1000
-  qualityLevel: 0.01
-  minDistance: 1.0
-  blockSize: 3
-  k: 0.04
-
-descriptor:
-  type: "ORB"         
-  nfeatures: 1000
-  scale factor: 1.2
-  nlevels: 1
-  edge threshold: 8   # 16
-  first level: 0 
-  WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
-  
-matcher:
-  type: "BRUTEFORCE_HAMMING" # BRUTEFORCE, BRUTEFORCE_HAMMING, BRUTEFORCE_HAMMING_2, BRUTEFORCE_L1, FLANNBASED
-  match type: 2    #  Match type. MATCH = 1, KNNMATCH = 2, RADIUSMATCH = 3
-  min normalized score: 0.9
-  filtering:
-    ransac epipolar distance: 1  # Distance to epipolar [pixels]
-    ransac confidence prob: 0.98 # Confidence probability
-    max match euclidean dist: 50 # Max euclidean distance to consider a match as inlier    
-    
\ No newline at end of file
diff --git a/src/examples/test_trifocal_optimization.cpp b/src/examples/test_trifocal_optimization.cpp
deleted file mode 100644
index 4e6ae7113cca24c723ddbd791db1426625950fef..0000000000000000000000000000000000000000
--- a/src/examples/test_trifocal_optimization.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-// Testing creating wolf tree from imported .graph file
-
-//C includes for sleep, time and main args
-#include "unistd.h"
-
-//std includes
-#include <iostream>
-
-// Vision utils
-#include <vision_utils.h>
-
-//Wolf includes
-#include "base/processor/processor_tracker_feature_trifocal.h"
-#include "base/capture/capture_image.h"
-#include "base/sensor/sensor_camera.h"
-#include "base/ceres_wrapper/ceres_manager.h"
-#include "base/rotations.h"
-#include "base/capture/capture_pose.h"
-#include "base/capture/capture_void.h"
-#include "base/factor/factor_autodiff_distance_3D.h"
-
-Eigen::VectorXs get_random_state(const double& _LO, const double& _HI)
-{
-    double range= _HI-_LO;
-    Eigen::VectorXs x = Eigen::VectorXs::Random(7); // Vector filled with random numbers between (-1,1)
-    x = (x + Eigen::VectorXs::Constant(7,1.0))*range/2.; // add 1 to the vector to have values between 0 and 2; multiply with range/2
-    x = (x + Eigen::VectorXs::Constant(7,_LO)); //set LO as the lower bound (offset)
-    x.segment(3,4).normalize(); // Normalize quaternion part
-    return x;
-}
-
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-    using std::shared_ptr;
-    using std::make_shared;
-    using std::static_pointer_cast;
-    using Eigen::Vector2s;
-
-    std::string wolf_root = _WOLF_ROOT_DIR;
-
-    // ===============================================
-    // TEST IMAGES ===================================
-    // ===============================================
-
-    // x,y displacement, negatives values are directly added in the path string (row-wise).
-    Eigen::MatrixXs img_pos = Eigen::MatrixXs::Zero(10,2);
-    img_pos.row(0) <<  0, 0;
-    img_pos.row(1) << -1, 0;
-    img_pos.row(2) << -2, 0;
-    img_pos.row(3) << -2,-1;
-    img_pos.row(4) << -2,-2;
-    img_pos.row(5) << -1,-2;
-    img_pos.row(6) <<  0,-2;
-    img_pos.row(7) <<  0,-1;
-    img_pos.row(8) <<  0, 0;
-    img_pos.row(9) << -1, 0;
-
-    // read image
-    std::cout << std::endl << "-> Reading images from ground-truth movements..." << std::endl;
-    std::vector<cv::Mat> images;
-    for (unsigned int ii = 0; ii < img_pos.rows(); ++ii)
-    {
-        std::string img_path = wolf_root + "/src/examples/Test_gazebo_x" + std::to_string((int)img_pos(ii,0)) + "0cm_y" + std::to_string((int)img_pos(ii,1)) + "0cm.jpg";
-        std::cout << " |->" << img_path << std::endl;
-        images.push_back(cv::imread(img_path, CV_LOAD_IMAGE_GRAYSCALE));   // Read the file
-        if(! images.at(ii).data )                              // Check for invalid input
-        {
-            std::cout <<  " X--Could not open or find the image: " << img_path << std::endl ;
-            return -1;
-        }
-    }
-    std::cout << std::endl;
-
-
-    // Scale ground truth priors
-    img_pos *= 0.10;
-
-    cv::imshow( "DEBUG VIEW", images.at(0) );  // Show our image inside it.
-    cv::waitKey(1);                            // Wait for a keystroke in the window
-
-    // ===============================================
-    // CONFIG WOLF ===================================
-    // ===============================================
-
-    // Wolf problem
-    ProblemPtr problem = Problem::create("PO 3D");
-
-    // CERES WRAPPER
-    CeresManagerPtr ceres_manager;
-    ceres::Solver::Options ceres_options;
-    ceres_options.max_num_iterations = 50;
-    ceres_options.function_tolerance = 1e-6;
-    ceres_manager = make_shared<CeresManager>(problem, ceres_options);
-
-    // Install tracker (sensor and processor)
-    Eigen::Vector7s cam_ext; cam_ext << 0.0,0.0,0.0, 0.0,0.0,0.0,1.0;
-    std::string cam_intr_yaml = wolf_root + "/src/examples/camera_params_1280x960_ideal.yaml";
-    SensorBasePtr sensor = problem->installSensor("CAMERA","camera",cam_ext,cam_intr_yaml);
-    SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(sensor);
-
-    std::string proc_trifocal_params_yaml = wolf_root + "/src/examples/processor_tracker_feature_trifocal.yaml";
-    ProcessorBasePtr processor = problem->installProcessor("TRACKER FEATURE TRIFOCAL","trifocal","camera", proc_trifocal_params_yaml);
-    ProcessorTrackerFeatureTrifocalPtr tracker = std::static_pointer_cast<ProcessorTrackerFeatureTrifocal>(processor);
-
-    // ===============================================
-    // KF1 (PRIOR) ===================================
-    // ===============================================
-
-    // Set problem PRIOR
-    Scalar dt = 0.01;
-    TimeStamp   t(0.0);
-    Vector7s    x; x <<  img_pos.row(0).transpose(), 0.0,   0.0, 0.0, 0.0, 1.0;
-    x.segment(3,4).normalize();
-    Matrix6s    P = Matrix6s::Identity() * 0.000001; // 1mm
-
-    // ====== KF1 ======
-    FrameBasePtr kf1 = problem->setPrior(x, P, t, dt/2);
-
-    // Process capture
-    CaptureImagePtr capture_1 = make_shared<CaptureImage>(t, camera, images.at(0));
-    camera->process(capture_1);
-
-    // Verify KFs
-    FrameBasePtr kf1_check = capture_1->getFrame();
-    assert(kf1->id()==kf1_check->id() && "Prior and KF1 are not the same!");
-
-//    problem->print(2,0,1,0);
-
-
-    // ===============================================
-    // Other KFs =====================================
-    // ===============================================
-    int kf_total_num = img_pos.rows();
-    std::vector<FrameBasePtr> kfs;
-    kfs.push_back(kf1);
-    std::vector<CaptureImagePtr> captures;
-    captures.push_back(capture_1);
-
-    for (int kf_id = 2; kf_id <= kf_total_num; ++kf_id)
-    {
-        t += dt; // increment t
-
-        if ( (kf_id % 2 == 1) )
-        {
-            x << img_pos.row(kf_id-1).transpose(),0.0,    0.0,0.0,0.0,1.0; // ground truth position
-            FrameBasePtr kf = problem->emplaceFrame(KEY_FRAME,x,t);
-            std::cout << "KeyFrm " << kf->id() << " TS: " << kf->getTimeStamp() << std::endl;
-            kfs.push_back(kf);
-            problem->keyFrameCallback(kf,nullptr,dt/2.0); // Ack KF creation
-        }
-
-        CaptureImagePtr capture = make_shared<CaptureImage>(t, camera, images.at(kf_id-1));
-        captures.push_back(capture);
-        std::cout << "Capture " << kf_id << " TS: " << capture->getTimeStamp() << std::endl;
-        camera->process(capture);
-
-        cv::waitKey(1); // Wait for a keystroke in the window
-    }
-
-    // ==================================================
-    // Establish Scale Factor (to see results scaled)
-    // ==================================================
-
-    std::cout << "================== ADD Scale factor ========================" << std::endl;
-
-    // Distance factor
-    Vector1s distance(0.2);      // 2x10cm distance -- this fixes the scale
-    Matrix1s dist_cov(0.000001); // 1mm error
-
-    CaptureBasePtr
-    cap_dist  = problem->closestKeyFrameToTimeStamp(TimeStamp(2*dt))->addCapture(make_shared<CaptureVoid>(t,
-                                                                                                          sensor));
-    FeatureBasePtr
-    ftr_dist = cap_dist->addFeature(make_shared<FeatureBase>("DISTANCE",
-                                                              distance,
-                                                              dist_cov));
-    FactorBasePtr
-    fac_dist  = ftr_dist->addFactor(make_shared<FactorAutodiffDistance3D>(ftr_dist,
-                                                                                  kfs.at(0),
-                                                                                  nullptr));
-    kfs.at(0)->addConstrainedBy(fac_dist);
-
-    problem->print(1,1,1,0);
-
-    // ===============================================
-    // SOLVE PROBLEM (1) =============================
-    // ===============================================
-
-    std::cout << "================== SOLVE 1rst TIME ========================" << std::endl;
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::FULL);
-    std::cout << report << std::endl;
-
-    problem->print(1,1,1,0);
-
-    // Print orientation states for all KFs
-    for (auto kf : problem->getTrajectory()->getFrameList())
-        std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getO()->getState()).transpose()*180.0/3.14159 << std::endl;
-
-
-    // ===============================================
-    // COVARIANCES ===================================
-    // ===============================================
-    // GET COVARIANCES of all states
-    WOLF_TRACE("======== COVARIANCES OF SOLVED PROBLEM =======")
-    ceres_manager->computeCovariances(SolverManager::CovarianceBlocksToBeComputed::ALL_MARGINALS);
-    for (auto kf : problem->getTrajectory()->getFrameList())
-        if (kf->isKey())
-        {
-            Eigen::MatrixXs cov;
-            kf->getCovariance(cov);
-            WOLF_TRACE("KF", kf->id(), "_std (sigmas) = ", cov.diagonal().transpose().array().sqrt());
-        }
-    std::cout << std::endl;
-
-    // ===============================================
-    // PERTURBATE STATES =============================
-    // ===============================================
-
-    // ADD PERTURBATION
-    std::cout << "================== ADD PERTURBATION ========================" << std::endl;
-
-    for (auto kf : problem->getTrajectory()->getFrameList())
-    {
-        if (kf != kf1)
-        {
-            Eigen::Vector7s perturbation; perturbation << Vector7s::Random() * 0.05;
-            Eigen::Vector7s state_perturbed = kf->getState() + perturbation;
-            state_perturbed.segment(3,4).normalize();
-            kf->setState(state_perturbed);
-            std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getO()->getState()).transpose()*180.0/3.14159 << std::endl;
-        }
-    }
-
-    problem->print(1,1,1,0);
-
-    // ===============================================
-    // SOLVE PROBLEM (2) =============================
-    // ===============================================
-
-    // ===== SOLVE SECOND TIME =====
-    report = ceres_manager->solve(SolverManager::ReportVerbosity::FULL);
-
-    std::cout << report << std::endl;
-
-    std::cout << "================== AFTER SOLVE 2nd TIME ========================" << std::endl;
-    problem->print(1,1,1,0);
-
-    for (auto kf : problem->getTrajectory()->getFrameList())
-        std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getO()->getState()).transpose()*180.0/3.14159 << std::endl;
-
-    cv::waitKey(0); // Wait for a keystroke in the window
-
-    return 0;
-}
diff --git a/src/processor/processor_tracker_feature_trifocal.cpp b/src/processor/processor_tracker_feature_trifocal.cpp
deleted file mode 100644
index 622f733132daf2ffdf2bc00ce9f21e016da050af..0000000000000000000000000000000000000000
--- a/src/processor/processor_tracker_feature_trifocal.cpp
+++ /dev/null
@@ -1,480 +0,0 @@
-
-// wolf
-#include "base/processor/processor_tracker_feature_trifocal.h"
-
-#include "base/sensor/sensor_camera.h"
-#include "base/feature/feature_point_image.h"
-#include "base/factor/factor_autodiff_trifocal.h"
-#include "base/capture/capture_image.h"
-
-// vision_utils
-#include <vision_utils.h>
-#include <detectors.h>
-#include <descriptors.h>
-#include <matchers.h>
-#include <algorithms.h>
-
-#include <memory>
-
-namespace wolf {
-
-//// DEBUG =====================================
-//debug_tTmp = clock();
-//WOLF_TRACE("======== DetectNewFeatures =========");
-//// ===========================================
-//
-//// DEBUG =====================================
-//debug_comp_time_ = (double)(clock() - debug_tTmp) / CLOCKS_PER_SEC;
-//WOLF_TRACE("--> TIME: Detect New Features: detect ",debug_comp_time_);
-//// ===========================================
-
-// Constructor
-ProcessorTrackerFeatureTrifocal::ProcessorTrackerFeatureTrifocal(ProcessorParamsTrackerFeatureTrifocalPtr _params_tracker_feature_trifocal) :
-        ProcessorTrackerFeature("TRACKER FEATURE TRIFOCAL", _params_tracker_feature_trifocal ),
-        params_tracker_feature_trifocal_(_params_tracker_feature_trifocal),
-        capture_image_last_(nullptr),
-        capture_image_incoming_(nullptr),
-        prev_origin_ptr_(nullptr),
-        initialized_(false)
-{
-    assert(!(params_tracker_feature_trifocal_->yaml_file_params_vision_utils.empty()) && "Missing YAML file with vision_utils parameters!");
-    assert(file_exists(params_tracker_feature_trifocal_->yaml_file_params_vision_utils) && "Cannot setup processor: vision_utils' YAML file does not exist.");
-
-    pixel_cov_ = Eigen::Matrix2s::Identity() * params_tracker_feature_trifocal_->pixel_noise_std * params_tracker_feature_trifocal_->pixel_noise_std;
-
-    // Detector
-    std::string det_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "detector");
-    det_ptr_ = vision_utils::setupDetector(det_name, det_name + " detector", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-    // Descriptor
-    std::string des_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "descriptor");
-    des_ptr_ = vision_utils::setupDescriptor(des_name, des_name + " descriptor", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-    // Matcher
-    std::string mat_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "matcher");
-    mat_ptr_ = vision_utils::setupMatcher(mat_name, mat_name + " matcher", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-//    // DEBUG VIEW
-    cv::startWindowThread();
-    cv::namedWindow("DEBUG VIEW", cv::WINDOW_NORMAL);
-//    cv::namedWindow("DEBUG MATCHES", cv::WINDOW_NORMAL);
-}
-
-// Destructor
-ProcessorTrackerFeatureTrifocal::~ProcessorTrackerFeatureTrifocal()
-{
-//    cv::destroyAllWindows();
-}
-
-bool ProcessorTrackerFeatureTrifocal::isInlier(const cv::KeyPoint& _kp_last, const cv::KeyPoint& _kp_incoming)
-{
-    // List of conditions
-    bool inlier = true;
-
-    // A. Check euclidean norm
-    inlier = inlier && (cv::norm(_kp_incoming.pt-_kp_last.pt) < mat_ptr_->getParams()->max_match_euclidean_dist);
-
-    // B. add your new condition test here
-    // inlier = inlier && ...
-
-    return inlier;
-}
-
-
-unsigned int ProcessorTrackerFeatureTrifocal::detectNewFeatures(const int& _max_new_features, FeatureBasePtrList& _features_last_out)
-{
-//    // DEBUG =====================================
-//    clock_t debug_tStart;
-//    double debug_comp_time_;
-//    debug_tStart = clock();
-//    WOLF_TRACE("======== DetectNewFeatures =========");
-//    // ===========================================
-
-    for (unsigned int n_iterations = 0; _max_new_features == -1 || n_iterations < _max_new_features; ++n_iterations)
-    {
-        Eigen::Vector2i cell_last;
-        if (capture_image_last_->grid_features_->pickEmptyTrackingCell(cell_last))
-        {
-            // Get best keypoint in cell
-            vision_utils::FeatureIdxMap cell_feat_map = capture_image_last_->grid_features_->getFeatureIdxMap(cell_last(0), cell_last(1));
-            bool found_feature_in_cell = false;
-
-            for (auto target_last_pair_response_idx : cell_feat_map)
-            {
-                // Get KeyPoint in last
-                unsigned int index_last = target_last_pair_response_idx.second;
-                cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(index_last);
-                assert(target_last_pair_response_idx.first == kp_last.response && "[ProcessorTrackerFeatureTrifocal::detectNewFeatures]: response mismatch.");
-
-                // Check if there is match with incoming, if not we do not want it
-                if (capture_image_last_->map_index_to_next_.count(index_last))
-                {
-                    // matching keypoint in incoming
-                    unsigned int index_incoming = capture_image_last_->map_index_to_next_[index_last];
-                    cv::KeyPoint kp_incoming = capture_image_incoming_->keypoints_.at(index_incoming);
-
-                    // validate match with extra tests
-                    if (isInlier( kp_incoming, kp_last))
-                    {
-                        // Create WOLF feature
-                        FeaturePointImagePtr ftr_point_last = std::make_shared<FeaturePointImage>(
-                                kp_last,
-                                index_last,
-                                capture_image_last_->descriptors_.row(index_last),
-                                pixel_cov_);
-
-                        _features_last_out.push_back(ftr_point_last);
-
-                        // hit cell to acknowledge there's a tracked point in that cell
-                        capture_image_last_->grid_features_->hitTrackingCell(kp_last);
-
-                        found_feature_in_cell = true;
-
-                        break; // Good kp found for this grid.
-                    }
-                }
-            }
-            if (!found_feature_in_cell)
-                capture_image_last_->grid_features_->blockTrackingCell(cell_last);
-        }
-        else
-            break; // There are no empty cells
-    }
-
-//    // DEBUG
-//    WOLF_TRACE("DetectNewFeatures - Number of new features detected: " , _feature_list_out.size() );
-//    debug_comp_time_ = (double)(clock() - debug_tStart) / CLOCKS_PER_SEC;
-//    WOLF_TRACE("--> TIME: detect new features: TOTAL ",debug_comp_time_);
-//    WOLF_TRACE("======== END DETECT NEW FEATURES =========");
-
-    return _features_last_out.size();
-}
-
-unsigned int ProcessorTrackerFeatureTrifocal::trackFeatures(const FeatureBasePtrList& _features_last_in, FeatureBasePtrList& _features_incoming_out, FeatureMatchMap& _feature_matches)
-{
-//    // DEBUG =====================================
-//    clock_t debug_tStart;
-//    double debug_comp_time_;
-//    debug_tStart = clock();
-//    WOLF_TRACE("======== TrackFeatures =========");
-//    // ===========================================
-
-    for (auto feature_base_last_ : _features_last_in)
-    {
-        FeaturePointImagePtr feature_last_ = std::static_pointer_cast<FeaturePointImage>(feature_base_last_);
-
-        if ( capture_image_last_->map_index_to_next_.count(feature_last_->getIndexKeyPoint()) )
-        {
-            int index_incoming = capture_image_last_->map_index_to_next_[feature_last_->getIndexKeyPoint()];
-
-            if (capture_image_incoming_->matches_normalized_scores_.at(index_incoming) > mat_ptr_->getParams()->min_norm_score )
-            {
-                // Check Euclidean distance between keypoints
-                cv::KeyPoint kp_incoming = capture_image_incoming_->keypoints_.at(index_incoming);
-                cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(feature_last_->getIndexKeyPoint());
-
-                if (isInlier(kp_last, kp_incoming))
-                {
-                    FeaturePointImagePtr ftr_point_incoming = std::make_shared<FeaturePointImage>(
-                            kp_incoming,
-                            index_incoming,
-                            capture_image_incoming_->descriptors_.row(index_incoming),
-                            pixel_cov_);
-
-                    _features_incoming_out.push_back(ftr_point_incoming);
-
-                    _feature_matches[ftr_point_incoming] = std::make_shared<FeatureMatch>(FeatureMatch({feature_last_, capture_image_incoming_->matches_normalized_scores_.at(index_incoming)}));
-
-                    // hit cell to acknowledge there's a tracked point in that cell
-                    capture_image_incoming_->grid_features_->hitTrackingCell(kp_incoming);
-                }
-            }
-        }
-    }
-
-//    // DEBUG
-//    WOLF_TRACE("TrAckFeatures - Number of features tracked: " , _feature_list_out.size() );
-//    debug_comp_time_ = (double)(clock() - debug_tStart) / CLOCKS_PER_SEC;
-//    WOLF_TRACE("--> TIME: track: ",debug_comp_time_);
-//    WOLF_TRACE("======== END TRACK FEATURES =========");
-
-    return _features_incoming_out.size();
-}
-
-bool ProcessorTrackerFeatureTrifocal::correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature)
-{
-    // We purposely ignore this step
-    return true;
-}
-
-bool ProcessorTrackerFeatureTrifocal::voteForKeyFrame()
-{
-//    // A. crossing voting threshold with ascending number of features
-    bool vote_up = false;
-//    // 1. vote if we did not have enough features before
-//    vote_up = vote_up && (previousNumberOfTracks() < params_tracker_feature_trifocal_->min_features_for_keyframe);
-//    // 2. vote if we have enough features now
-//    vote_up = vote_up && (incoming_ptr_->getFeatureList().size() >= params_tracker_feature_trifocal_->min_features_for_keyframe);
-
-    // B. crossing voting threshold with descending number of features
-    bool vote_down = true;
-    // 1. vote if we had enough features before
-//    vote_down = vote_down && (last_ptr_->getFeatureList().size() >= params_tracker_feature_trifocal_->min_features_for_keyframe);
-    // 2. vote if we have not enough features now
-    vote_down = vote_down && (incoming_ptr_->getFeatureList().size() < params_tracker_feature_trifocal_->min_features_for_keyframe);
-
-//    // C. Time-based policies
-    bool vote_time = false;
-////    vote_time = vote_time || (incoming_ptr_->getTimeStamp()-origin_ptr_->getTimeStamp() > 1.0);
-//
-//    if (vote_up)
-//        WOLF_TRACE("VOTE UP");
-//    if (vote_down)
-//        WOLF_TRACE("VOTE DOWN");
-//    if (vote_time)
-//        WOLF_TRACE("VOTE TIME");
-
-    return vote_up || vote_down || vote_time;
-}
-
-void ProcessorTrackerFeatureTrifocal::advanceDerived()
-{
-    ProcessorTrackerFeature::advanceDerived();
-}
-
-void ProcessorTrackerFeatureTrifocal::resetDerived()
-{
-    // Call parent class' reset method
-    ProcessorTrackerFeature::resetDerived();
-
-    // Conditionally assign the prev_origin pointer
-    if (initialized_)
-        prev_origin_ptr_ = origin_ptr_;
-
-//    // Print resulting list
-//    TrackMatches matches_prevorig_origin = track_matrix_.matches(prev_origin_ptr_, origin_ptr_);
-//    for (auto const & pair_trkid_pair : matches_prevorig_origin)
-//    {
-//        FeatureBasePtr feature_in_prev   = pair_trkid_pair.second.first;
-//        FeatureBasePtr feature_in_origin = pair_trkid_pair.second.second;
-//
-//        WOLF_DEBUG("Matches reset prev <-- orig: track: ", pair_trkid_pair.first,
-//                   " prev: ", feature_in_prev->id(),
-//                   " origin: ", feature_in_origin->id());
-//    }
-}
-
-void ProcessorTrackerFeatureTrifocal::preProcess()
-{
-//    //DEBUG
-//    WOLF_TRACE("-------- Image ", getIncoming()->id(), " -- t = ", getIncoming()->getTimeStamp(), " s ----------");
-
-    if (!initialized_)
-    {
-        if (origin_ptr_ && last_ptr_ && (last_ptr_ != origin_ptr_) && prev_origin_ptr_ == nullptr)
-            prev_origin_ptr_ = origin_ptr_;
-
-        if (prev_origin_ptr_ && origin_ptr_ && last_ptr_ && prev_origin_ptr_ != origin_ptr_)
-            initialized_ = true;
-    }
-
-    // Get capture
-    capture_image_incoming_ = std::static_pointer_cast<CaptureImage>(incoming_ptr_);
-
-    // Detect INC KeyPoints
-    capture_image_incoming_->keypoints_ = det_ptr_->detect(capture_image_incoming_->getImage());
-
-    // Get INC descriptors
-    capture_image_incoming_->descriptors_ = des_ptr_->getDescriptor(capture_image_incoming_->getImage(), capture_image_incoming_->keypoints_);
-
-    // Create and fill incoming grid
-    capture_image_incoming_->grid_features_ = std::make_shared<vision_utils::FeatureIdxGrid>(capture_image_incoming_->getImage().rows, capture_image_incoming_->getImage().cols, params_tracker_feature_trifocal_->n_cells_v, params_tracker_feature_trifocal_->n_cells_h);
-
-    capture_image_incoming_->grid_features_->insert(capture_image_incoming_->keypoints_);
-
-    // If last_ptr_ is not null, then we can do some computation here.
-    if (last_ptr_ != nullptr)
-    {
-        // Get capture
-        capture_image_last_ = std::static_pointer_cast<CaptureImage>(last_ptr_);
-
-        // Match full image (faster)
-        // We exchange the order of the descriptors to fill better the map hereafter (map does not allow a single key)
-        capture_image_incoming_->matches_normalized_scores_ = mat_ptr_->robustMatch(capture_image_incoming_->keypoints_,
-                                                                              capture_image_last_->keypoints_,
-                                                                              capture_image_incoming_->descriptors_,
-                                                                              capture_image_last_->descriptors_,
-                                                                              des_ptr_->getSize(),
-                                                                              capture_image_incoming_->matches_from_precedent_);
-
-        // Set capture map of match indices
-        for (auto match : capture_image_incoming_->matches_from_precedent_)
-            capture_image_last_->map_index_to_next_[match.trainIdx] = match.queryIdx; // map[last] = incoming
-
-        // DEBUG
-//        cv::Mat img_last = (std::static_pointer_cast<CaptureImage>(last_ptr_))->getImage();
-//        cv::Mat img_incoming = (std::static_pointer_cast<CaptureImage>(incoming_ptr_))->getImage();
-//
-//        cv::putText(img_last, "LAST",    cv::Point(img_last.cols/2,20), cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 10.0);
-//        cv::putText(img_incoming, "INCOMING",cv::Point(img_last.cols/2,20), cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 10.0);
-//
-//        cv::Mat img_matches;
-//        cv::drawMatches(img_incoming, capture_incoming_->keypoints_, img_last, capture_last_->keypoints_, capture_incoming_->matches_from_precedent_, img_matches);
-//        cv::imshow("DEBUG MATCHES", img_matches);
-//        cv::waitKey(0);
-    }
-}
-
-void ProcessorTrackerFeatureTrifocal::postProcess()
-{
-    std::map<int,std::list<vision_utils::KeyPointEnhanced> > kp_enh_tracks;
-
-    for (auto const & feat_base : last_ptr_->getFeatureList())
-    {
-        FeaturePointImagePtr feat = std::static_pointer_cast<FeaturePointImage>(feat_base);
-        unsigned int feat_id = feat->id();
-        unsigned int track_id = feat->trackId();
-
-        // tracks
-        std::list<vision_utils::KeyPointEnhanced> kp_enh_track;
-        for (auto feat_base : track_matrix_.trackAsVector(track_id))
-        {
-            FeaturePointImagePtr feat_img = std::static_pointer_cast<FeaturePointImage>(feat_base);
-            vision_utils::KeyPointEnhanced kp_enh(feat_img->getKeypoint(),
-                                                  feat_id,
-                                                  track_id,
-                                                  track_matrix_.trackSize(track_id),
-                                                  feat_img->getMeasurementCovariance());
-            kp_enh_track.push_back(kp_enh);
-        }
-
-        kp_enh_tracks[feat_id] = kp_enh_track;
-    }
-
-    // DEBUG
-    image_debug_ = vision_utils::buildImageProcessed((std::static_pointer_cast<CaptureImage>(last_ptr_))->getImage(), kp_enh_tracks);
-
-    cv::imshow("DEBUG VIEW", image_debug_);
-    cv::waitKey(1);
-}
-
-FactorBasePtr ProcessorTrackerFeatureTrifocal::createFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr)
-{
-    // NOTE: This function cannot be implemented because the API lacks an input to feature_prev_origin.
-    // Therefore, we implement establishFactors() instead and do all the job there.
-    // This function remains empty, and with a warning or even an error issued in case someone calls it.
-    std::cout << "033[1;33m [WARN]:033[0m ProcessorTrackerFeatureTrifocal::createFactor is empty." << std::endl;
-    FactorBasePtr return_var{}; //TODO: fill this variable
-    return return_var;
-}
-
-void ProcessorTrackerFeatureTrifocal::establishFactors()
-{
-//    WOLF_TRACE("===== ESTABLISH CONSTRAINT =====");
-
-    if (initialized_)
-    {
-        // get tracks between prev, origin and last
-        TrackMatches matches = track_matrix_.matches(prev_origin_ptr_, last_ptr_); // it's guaranteed by construction that the track also includes origin
-
-        for (auto pair_trkid_match : matches) // OMG this will add potentially a loooot of factors! TODO see a smarter way of adding factors
-        {                                     // Currently reduced by creating factors for large tracks
-            // get track ID
-            SizeStd trk_id = pair_trkid_match.first;
-
-            size_t trk_length = track_matrix_.trackSize(trk_id);
-
-            if (trk_length >= params_tracker_feature_trifocal_->min_track_length_for_factor)
-            {
-                // get the last feature in the track
-                FeatureBasePtr ftr_last = pair_trkid_match.second.second;
-
-                // get the first feature in the whole track
-                FeatureBasePtr ftr_first = track_matrix_.firstFeature(trk_id);
-
-                // get the middle feature of the track using the average of the time stamps
-                FeatureBasePtr ftr_mid = nullptr;
-
-                TimeStamp ts_first      = ftr_first->getCapture()->getTimeStamp();
-                TimeStamp ts_last       = ftr_last->getCapture()->getTimeStamp();
-                Scalar    Dt2           = (ts_last - ts_first) / 2.0;
-                TimeStamp ts_ave        = ts_first + Dt2;
-
-                Scalar dt_dev_min = Dt2;
-                auto track = track_matrix_.track(trk_id);
-                for (auto ftr_it = track.begin() ; ftr_it != track.end() ; ftr_it ++)
-                {
-                    if ( ftr_it->second->getCapture() != nullptr ) // have capture
-                    {
-                        if ( auto kf_mid = ftr_it->second->getCapture()->getFrame() ) // have frame
-                        {
-                            TimeStamp ts_mid    = kf_mid->getTimeStamp();
-
-                            auto dt_dev_curr = fabs(ts_mid - ts_ave);
-                            if (dt_dev_curr <= dt_dev_min) // dt_dev is decreasing
-                            {
-                                dt_dev_min  = dt_dev_curr;
-                                ftr_mid = ftr_it->second;
-                            }
-                            else //if (dt_dev is increasing)
-                                break;
-                        }
-                    }
-                }
-
-                // Several safety checks
-                assert(ftr_mid != nullptr   && "Middle feature is nullptr!");
-                assert(ftr_mid->getCapture()->getFrame() != nullptr   && "Middle feature's frame is nullptr!");
-                assert(ftr_mid != ftr_first && "First and middle features are the same!");
-                assert(ftr_mid != ftr_last  && "Last and middle features are the same!");
-
-                // make constraint
-                FactorAutodiffTrifocalPtr fac = std::make_shared<FactorAutodiffTrifocal>(ftr_first,
-                                                                                         ftr_mid,
-                                                                                         ftr_last,
-                                                                                         shared_from_this(),
-                                                                                         false,
-                                                                                         FAC_ACTIVE);
-
-                // link to wolf tree
-                fac         ->  setFeature      (ftr_last);
-                ftr_first   ->  addConstrainedBy(fac);
-                ftr_mid     ->  addConstrainedBy(fac);
-                ftr_last    ->  addFactor       (fac);
-            }
-        }
-    }
-
-//    WOLF_TRACE("===== END ESTABLISH CONSTRAINT =====");
-
-}
-
-void ProcessorTrackerFeatureTrifocal::setParams(const ProcessorParamsTrackerFeatureTrifocalPtr _params)
-{
-    params_tracker_feature_trifocal_ = _params;
-}
-
-void ProcessorTrackerFeatureTrifocal::configure(SensorBasePtr _sensor)
-{
-    _sensor->setNoiseStd(Vector2s::Ones() * params_tracker_feature_trifocal_->pixel_noise_std);
-}
-
-ProcessorBasePtr ProcessorTrackerFeatureTrifocal::create(const std::string& _unique_name,
-                                                         const ProcessorParamsBasePtr _params,
-                                                         const SensorBasePtr _sensor_ptr)
-{
-  const auto params = std::static_pointer_cast<ProcessorParamsTrackerFeatureTrifocal>(_params);
-
-  ProcessorBasePtr prc_ptr = std::make_shared<ProcessorTrackerFeatureTrifocal>(params);
-  prc_ptr->setName(_unique_name);
-  prc_ptr->configure(_sensor_ptr);
-  return prc_ptr;
-}
-
-} // namespace wolf
-
-// Register in the ProcessorFactory
-#include "base/processor/processor_factory.h"
-namespace wolf {
-WOLF_REGISTER_PROCESSOR("TRACKER FEATURE TRIFOCAL", ProcessorTrackerFeatureTrifocal)
-} // namespace wolf
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index ac0411962e626a86cda0a101bc61c05b7b00010a..56ff30a51d369af9e2d8a00bfdc3141788b0bc36 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -138,12 +138,6 @@ target_link_libraries(gtest_factor_absolute ${PROJECT_NAME})
 wolf_add_gtest(gtest_factor_autodiff_distance_3D gtest_factor_autodiff_distance_3D.cpp)
 target_link_libraries(gtest_factor_autodiff_distance_3D ${PROJECT_NAME})
 
-IF(vision_utils_FOUND)
-# FactorAutodiffTrifocal test
-wolf_add_gtest(gtest_factor_autodiff_trifocal gtest_factor_autodiff_trifocal.cpp)
-target_link_libraries(gtest_factor_autodiff_trifocal ${PROJECT_NAME})
-ENDIF(vision_utils_FOUND)
-
 # FactorGnssFix2D test
 wolf_add_gtest(gtest_factor_gnss_fix_2D gtest_factor_gnss_fix_2D.cpp)
 target_link_libraries(gtest_factor_gnss_fix_2D ${PROJECT_NAME})
@@ -205,12 +199,6 @@ target_link_libraries(gtest_processor_IMU ${PROJECT_NAME})
 wolf_add_gtest(gtest_processor_IMU_jacobians gtest_processor_IMU_jacobians.cpp)
 target_link_libraries(gtest_processor_IMU_jacobians ${PROJECT_NAME})
 
-IF(vision_utils_FOUND)
-# ProcessorFeatureTrifocal test
-wolf_add_gtest(gtest_processor_tracker_feature_trifocal gtest_processor_tracker_feature_trifocal.cpp)
-target_link_libraries(gtest_processor_tracker_feature_trifocal ${PROJECT_NAME})
-ENDIF(vision_utils_FOUND)
-
 # ProcessorMotion in 2D
 wolf_add_gtest(gtest_odom_2D gtest_odom_2D.cpp)
 target_link_libraries(gtest_odom_2D ${PROJECT_NAME})
diff --git a/test/gtest_factor_autodiff_trifocal.cpp b/test/gtest_factor_autodiff_trifocal.cpp
deleted file mode 100644
index c270182766e6ff13cf6e02ba1e0784fe5e061bed..0000000000000000000000000000000000000000
--- a/test/gtest_factor_autodiff_trifocal.cpp
+++ /dev/null
@@ -1,974 +0,0 @@
-#include "utils_gtest.h"
-
-#include "base/logging.h"
-
-#include "base/ceres_wrapper/ceres_manager.h"
-#include "base/processor/processor_tracker_feature_trifocal.h"
-#include "base/capture/capture_image.h"
-#include "base/factor/factor_autodiff_trifocal.h"
-
-using namespace Eigen;
-using namespace wolf;
-
-class FactorAutodiffTrifocalTest : public testing::Test{
-    public:
-        Vector3s    pos1,   pos2,   pos3,   pos_cam, point;
-        Vector3s    euler1, euler2, euler3, euler_cam;
-        Quaternions quat1,  quat2,  quat3,  quat_cam;
-        Vector4s    vquat1, vquat2, vquat3, vquat_cam; // quaternions as vectors
-        Vector7s    pose1,  pose2,  pose3,  pose_cam;
-
-        ProblemPtr      problem;
-        CeresManagerPtr ceres_manager;
-
-        SensorCameraPtr camera;
-        ProcessorTrackerFeatureTrifocalPtr proc_trifocal;
-
-        SensorBasePtr   S;
-        FrameBasePtr    F1, F2, F3;
-        CaptureImagePtr I1, I2, I3;
-        FeatureBasePtr  f1, f2, f3;
-        FactorAutodiffTrifocalPtr c123;
-
-        Scalar pixel_noise_std;
-
-        virtual void SetUp() override
-        {
-            std::string wolf_root = _WOLF_ROOT_DIR;
-
-            // configuration
-            /*
-             * We have three robot poses, in three frames, with cameras C1, C2, C3
-             * looking towards the origin of coordinates:
-             *
-             *              Z
-             *              |
-             *           ________  C3
-             *          /   |   /
-             *         ---------           /| C2
-             *              |             / |
-             *              |____________/_ | ___ Y
-             *             /             |  /
-             *            /              | /
-             *      --------- C1         |/
-             *      |   /   |
-             *      ---------
-             *        /
-             *       Y
-             *
-             * Each robot pose is at one axis, facing the origin:
-             *   F1: pos = (1,0,0), ori = (0,0,pi)
-             *   F2: pos = (0,1,0), ori = (0,0,-pi/2)
-             *   F3: pos = (0,0,1), ori = (0,pi/2,pi)
-             *
-             * The robot has a camera looking forward
-             *   S: pos = (0,0,0), ori = (-pi/1, 0, -pi/1)
-             *
-             * There is a point at the origin
-             *   P: pos = (0,0,0)
-             *
-             * The camera is canonical
-             *   K = Id.
-             *
-             * Therefore, P projects exactly at the origin on each camera,
-             * creating three features:
-             *   f1: p1 = (0,0)
-             *   f2: p2 = (0,0)
-             *   f3: p3 = (0,0)
-             *
-             * We form a Wolf tree with three frames, three captures,
-             * three features, and one trifocal factor:
-             *
-             *   Frame F1, Capture C1, feature f1
-             *   Frame F2, Capture C2, feature f2
-             *   Frame F3, Capture C3, feature f3, factor c123
-             *
-             * The three frame poses F1, F2, F3 and the camera pose S
-             * in the robot frame are variables subject to optimization
-             *
-             * We perform a number of tests based on this configuration.
-             */
-
-            // all frames look to the origin
-            pos1   << 1, 0, 0;
-            pos2   << 0, 1, 0;
-            pos3   << 0, 0, 1;
-            euler1 << 0, 0     ,  M_PI   ;
-            euler2 << 0, 0     , -M_PI_2 ;
-            euler3 << 0, M_PI_2,  M_PI   ;
-            quat1  =  e2q(euler1);
-            quat2  =  e2q(euler2);
-            quat3  =  e2q(euler3);
-            vquat1 =  quat1.coeffs();
-            vquat2 =  quat2.coeffs();
-            vquat3 =  quat3.coeffs();
-            pose1  << pos1, vquat1;
-            pose2  << pos2, vquat2;
-            pose3  << pos3, vquat3;
-
-            // camera at the robot origin looking forward
-            pos_cam   <<  0, 0, 0;
-            euler_cam << -M_PI_2, 0, -M_PI_2;// euler_cam *= M_TORAD;
-            quat_cam  =  e2q(euler_cam);
-            vquat_cam =  quat_cam.coeffs();
-            pose_cam  << pos_cam, vquat_cam;
-
-            // Build problem
-            problem = Problem::create("PO 3D");
-            ceres::Solver::Options options;
-            ceres_manager = std::make_shared<CeresManager>(problem, options);
-
-            // Install sensor and processor
-            S      = problem->installSensor("CAMERA", "canonical", pose_cam, wolf_root + "/src/examples/camera_params_canonical.yaml");
-            camera = std::static_pointer_cast<SensorCamera>(S);
-
-            ProcessorParamsTrackerFeatureTrifocalPtr params_tracker_feature_trifocal_trifocal = std::make_shared<ProcessorParamsTrackerFeatureTrifocal>();
-            params_tracker_feature_trifocal_trifocal->time_tolerance                = 1.0/2;
-            params_tracker_feature_trifocal_trifocal->max_new_features              = 5;
-            params_tracker_feature_trifocal_trifocal->min_features_for_keyframe     = 5;
-            params_tracker_feature_trifocal_trifocal->yaml_file_params_vision_utils = wolf_root + "/src/examples/processor_tracker_feature_trifocal_vision_utils.yaml";
-
-            ProcessorBasePtr proc = problem->installProcessor("TRACKER FEATURE TRIFOCAL", "trifocal", camera, params_tracker_feature_trifocal_trifocal);
-            proc_trifocal = std::static_pointer_cast<ProcessorTrackerFeatureTrifocal>(proc);
-
-            // Add three viewpoints with frame, capture and feature
-            Vector2s pix(0,0);
-            Matrix2s pix_cov; pix_cov.setIdentity();
-
-            F1 = problem->emplaceFrame(KEY_FRAME, pose1, 1.0);
-            I1 = std::make_shared<CaptureImage>(1.0, camera, cv::Mat(2,2,CV_8UC1));
-            F1-> addCapture(I1);
-            f1 = std::make_shared<FeatureBase>("PIXEL", pix, pix_cov); // pixel at origin
-            I1-> addFeature(f1);
-
-            F2 = problem->emplaceFrame(KEY_FRAME, pose2, 2.0);
-            I2 = std::make_shared<CaptureImage>(2.0, camera, cv::Mat(2,2,CV_8UC1));
-            F2-> addCapture(I2);
-            f2 = std::make_shared<FeatureBase>("PIXEL", pix, pix_cov); // pixel at origin
-            I2-> addFeature(f2);
-
-            F3 = problem->emplaceFrame(KEY_FRAME, pose3, 3.0);
-            I3 = std::make_shared<CaptureImage>(3.0, camera, cv::Mat(2,2,CV_8UC1));
-            F3-> addCapture(I3);
-            f3 = std::make_shared<FeatureBase>("PIXEL", pix, pix_cov); // pixel at origin
-            I3-> addFeature(f3);
-
-            // trifocal factor
-            c123 = std::make_shared<FactorAutodiffTrifocal>(f1, f2, f3, proc_trifocal, false, FAC_ACTIVE);
-            f3   ->addFactor   (c123);
-            f1   ->addConstrainedBy(c123);
-            f2   ->addConstrainedBy(c123);
-        }
-};
-
-TEST_F(FactorAutodiffTrifocalTest, InfoMatrix)
-{
-    /** Ground truth covariance. Rationale:
-     * Due to the orthogonal configuration (see line 40 and onwards), we have:
-     *   Let s = pixel_noise_std.
-     *   Let d = 1 the distance from the cameras to the 3D point
-     *   Let k be a proportionality constant related to the projection and pixellization process
-     *   Let S = k*d*s
-     *   The pixel on camera 1 retroprojects a conic PDF with width S = k*s*d
-     *   The line on camera 2 retroprojects a plane aperture of S = k*s*d
-     *   The product (ie intersection) of cone and plane aperture PDFs is a sphere of radius S
-     *   Projection of the sphere to camera 3 is a circle of S/k/d=s pixels
-     *   This is the projected covariance: s^2 pixel^2
-     *   The measurement has a std dev of s pixel --> cov is s^2 pixel^2
-     *   The total cov is s^2 pix^2 + s^2 pix^2 = 2s^2 pix^2
-     *   The info matrix is 0.5 s^-2 pix^-2
-     *   The sqrt info matrix is 1/s/sqrt(2) pix^-1
-     */
-    Matrix3s sqrt_info_gt = Matrix3s::Identity() / pixel_noise_std / sqrt(2.0);
-
-    ASSERT_MATRIX_APPROX(c123->getSqrtInformationUpper(), sqrt_info_gt, 1e-8);
-
-}
-
-TEST_F(FactorAutodiffTrifocalTest, expectation)
-{
-    //    Homogeneous transform C2 wrt C1
-    Matrix4s _c1Hc2; _c1Hc2 <<
-               0,  0, -1,  1,
-               0,  1,  0,  0,
-               1,  0,  0,  1,
-               0,  0,  0,  1;
-
-    // rotation and translation
-    Matrix3s _c1Rc2 = _c1Hc2.block(0,0,3,3);
-    Vector3s _c1Tc2 = _c1Hc2.block(0,3,3,1);
-
-    // Essential matrix, ground truth (fwd and bkwd)
-    Matrix3s _c1Ec2 = wolf::skew(_c1Tc2) * _c1Rc2;
-    Matrix3s _c2Ec1 = _c1Ec2.transpose();
-
-    // Expected values
-    vision_utils::TrifocalTensor tensor;
-    Matrix3s c2Ec1;
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-
-    // check trilinearities
-
-    // Elements computed using the tensor
-    Matrix3s T0, T1, T2;
-    tensor.getLayers(T0,T1,T2);
-    Vector3s _m1 (0,0,1),
-             _m2 (0,0,1),
-             _m3 (0,0,1); // ground truth
-    Matrix3s m1Tt = _m1(0)*T0.transpose()
-                  + _m1(1)*T1.transpose()
-                  + _m1(2)*T2.transpose();
-
-    // Projective line: l = (nx ny dn), with (nx ny): normal vector; dn: distance to origin times norm(nx,ny)
-    Vector3s _l2(0,1,0),
-             _p2(1,0,0),
-             _p3(0,1,0); // ground truth
-    Vector3s l2;
-    l2 = c2Ec1 * _m1;
-
-    // check epipolar lines (check only director vectors for equal direction)
-    ASSERT_MATRIX_APPROX(l2/l2(1), _l2/_l2(1), 1e-8);
-
-    // check perpendicular lines (check only director vectors for orthogonal direction)
-    ASSERT_NEAR(l2(0)*_p2(0) + l2(1)*_p2(1), 0, 1e-8);
-
-    // Verify trilinearities
-
-    // Point-line-line
-    Matrix1s pll = _p3.transpose() * m1Tt * _p2;
-    ASSERT_TRUE(pll(0)<1e-5);
-
-    // Point-line-point
-    Vector3s plp = wolf::skew(_m3) * m1Tt * _p2;
-    ASSERT_MATRIX_APPROX(plp, Vector3s::Zero(), 1e-8);
-
-    // Point-point-line
-    Vector3s ppl = _p3.transpose() * m1Tt * wolf::skew(_m2);
-    ASSERT_MATRIX_APPROX(ppl, Vector3s::Zero(), 1e-8);
-
-    // Point-point-point
-    Matrix3s ppp = wolf::skew(_m3) * m1Tt * wolf::skew(_m2);
-    ASSERT_MATRIX_APPROX(ppp, Matrix3s::Zero(), 1e-8);
-
-    // check epipolars
-    ASSERT_MATRIX_APPROX(c2Ec1/c2Ec1(0,1), _c2Ec1/_c2Ec1(0,1), 1e-8);
-}
-
-TEST_F(FactorAutodiffTrifocalTest, residual)
-{
-    vision_utils::TrifocalTensor tensor;
-    Matrix3s c2Ec1;
-    Vector3s residual;
-
-    // Nominal values
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-    residual = c123->residual(tensor, c2Ec1);
-
-    ASSERT_MATRIX_APPROX(residual, Vector3s::Zero(), 1e-8);
-}
-
-TEST_F(FactorAutodiffTrifocalTest, error_jacobians)
-{
-    vision_utils::TrifocalTensor tensor;
-    Matrix3s c2Ec1;
-    Vector3s residual, residual_pert;
-    Vector3s pix0, pert, pix_pert;
-    Scalar epsilon = 1e-8;
-
-    // Nominal values
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-    residual = c123->residual(tensor, c2Ec1);
-
-    Matrix<Scalar, 3, 3> J_e_m1, J_e_m2, J_e_m3, J_r_m1, J_r_m2, J_r_m3;
-    c123->error_jacobians(tensor, c2Ec1, J_e_m1, J_e_m2, J_e_m3);
-    J_r_m1 = c123->getSqrtInformationUpper() * J_e_m1;
-    J_r_m2 = c123->getSqrtInformationUpper() * J_e_m2;
-    J_r_m3 = c123->getSqrtInformationUpper() * J_e_m3;
-
-    // numerical jacs
-    Matrix<Scalar,3,3> Jn_r_m1, Jn_r_m2, Jn_r_m3;
-
-    // jacs wrt m1
-    pix0 = c123->getPixelCanonicalPrev();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonicalPrev(pix_pert); // m1
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m1.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonicalPrev(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m1, Jn_r_m1, 1e-6);
-
-    // jacs wrt m2
-    pix0 = c123->getPixelCanonicalOrigin();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonicalOrigin(pix_pert); // m2
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m2.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonicalOrigin(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m2, Jn_r_m2, 1e-6);
-
-    // jacs wrt m3
-    pix0 = c123->getPixelCanonicalLast();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonicalLast(pix_pert); // m3
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m3.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonicalLast(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m3, Jn_r_m3, 1e-6);
-
-}
-
-TEST_F(FactorAutodiffTrifocalTest, operator_parenthesis)
-{
-    Vector3s res;
-
-    // Factor with exact states should give zero residual
-    c123->operator ()(pos1.data(), vquat1.data(),
-                      pos2.data(), vquat2.data(),
-                      pos3.data(), vquat3.data(),
-                      pos_cam.data(), vquat_cam.data(),
-                      res.data());
-
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-}
-
-TEST_F(FactorAutodiffTrifocalTest, solve_F1)
-{
-    F1->setState(pose1);
-    F2->setState(pose2);
-    F3->setState(pose3);
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-    // Residual with prior
-
-    Vector3s res;
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F1->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-    // Residual with perturbated state
-
-    Vector7s pose_perturbated = F1->getState() + 0.1 * Vector7s::Random();
-    pose_perturbated.segment(3,4).normalize();
-    F1->setState(pose_perturbated);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", pose_perturbated.transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->unfix();
-    F2->fix();
-    F3->fix();
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F1->getState().transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-}
-
-TEST_F(FactorAutodiffTrifocalTest, solve_F2)
-{
-    F1->setState(pose1);
-    F2->setState(pose2);
-    F3->setState(pose3);
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3s res;
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F2->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-    // Residual with perturbated state
-
-    Vector7s pose_perturbated = F2->getState() + 0.1 * Vector7s::Random();
-    pose_perturbated.segment(3,4).normalize();
-    F2->setState(pose_perturbated);
-
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", pose_perturbated.transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->fix();
-    F2->unfix();
-    F3->fix();
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F2->getState().transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-}
-
-TEST_F(FactorAutodiffTrifocalTest, solve_F3)
-{
-    F1->setState(pose1);
-    F2->setState(pose2);
-    F3->setState(pose3);
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3s res;
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F3->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-    // Residual with perturbated state
-
-    Vector7s pose_perturbated = F3->getState() + 0.1 * Vector7s::Random();
-    pose_perturbated.segment(3,4).normalize();
-    F3->setState(pose_perturbated);
-
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", pose_perturbated.transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-    ASSERT_NEAR(res(2), 0, 1e-8); // Epipolar c2-c1 should be respected when perturbing F3
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->fix();
-    F2->fix();
-    F3->unfix();
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F3->getState().transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-}
-
-TEST_F(FactorAutodiffTrifocalTest, solve_S)
-{
-    F1->setState(pose1);
-    F2->setState(pose2);
-    F3->setState(pose3);
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3s res;
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", S->getP()->getState().transpose(), " ", S->getO()->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-    // Residual with perturbated state
-
-    Vector3s pos_perturbated = pos_cam   + 0.1 * Vector3s::Random();
-    Vector4s ori_perturbated = vquat_cam + 0.1 * Vector4s::Random();
-    ori_perturbated.normalize();
-    Vector7s pose_perturbated; pose_perturbated << pos_perturbated, ori_perturbated;
-    S->getP()->setState(pos_perturbated);
-    S->getO()->setState(ori_perturbated);
-
-    S_p = S->getP()->getState();
-    S_o = S->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", pose_perturbated.transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->unfixExtrinsics();
-    F1->fix();
-    F2->fix();
-    F3->fix();
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", S->getP()->getState().transpose(), " ", S->getO()->getState().transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-
-}
-
-class FactorAutodiffTrifocalMultiPointTest : public FactorAutodiffTrifocalTest
-{
-        /*
-         * In this test class we add 8 more points and perform optimization on the camera frames.
-         *
-         * C1 is not optimized as it acts as the reference
-         * S  is not optimized either as observability is compromised
-         * C2.pos is fixed to set the unobservable scale
-         * C2.ori and all C3 are optimized
-         *
-         */
-
-    public:
-        std::vector<FeatureBasePtr> fv1, fv2, fv3;
-        std::vector<FactorAutodiffTrifocalPtr> cv123;
-
-        virtual void SetUp() override
-        {
-            FactorAutodiffTrifocalTest::SetUp();
-
-            Matrix<Scalar, 2, 9> c1p_can;
-            c1p_can <<
-                    0,   -1/3.00,   -1/3.00,    1/3.00,    1/3.00,   -1.0000,   -1.0000, 1.0000,    1.0000,
-                    0,    1/3.00,   -1/3.00,    1/3.00,   -1/3.00,    1.0000,   -1.0000, 1.0000,   -1.0000;
-
-            Matrix<Scalar, 2, 9> c2p_can;
-            c2p_can <<
-                    0,    1/3.00,    1/3.00,    1.0000,    1.0000,   -1/3.00,   -1/3.00,   -1.0000,   -1.0000,
-                    0,    1/3.00,   -1/3.00,    1.0000,   -1.0000,    1/3.00,   -1/3.00,    1.0000,   -1.0000;
-
-            Matrix<Scalar, 2, 9> c3p_can;
-            c3p_can <<
-                    0,   -1/3.00,   -1.0000,    1/3.00,    1.0000,   -1/3.00,   -1.0000,   1/3.00,    1.0000,
-                    0,   -1/3.00,   -1.0000,   -1/3.00,   -1.0000,    1/3.00,    1.0000,   1/3.00,    1.0000;
-
-            Matrix2s pix_cov; pix_cov.setIdentity(); //pix_cov *= 1e-4;
-
-            // for i==0 we already have them
-            fv1.push_back(f1);
-            fv2.push_back(f2);
-            fv3.push_back(f3);
-            cv123.push_back(c123);
-            for (size_t i=1; i<c1p_can.cols() ; i++)
-            {
-                fv1.push_back(std::make_shared<FeatureBase>("PIXEL", c1p_can.col(i), pix_cov));
-                I1->addFeature(fv1.at(i));
-
-                fv2.push_back(std::make_shared<FeatureBase>("PIXEL", c2p_can.col(i), pix_cov));
-                I2->addFeature(fv2.at(i));
-
-                fv3.push_back(std::make_shared<FeatureBase>("PIXEL", c3p_can.col(i), pix_cov));
-                I3->addFeature(fv3.at(i));
-
-                cv123.push_back(std::make_shared<FactorAutodiffTrifocal>(fv1.at(i), fv2.at(i), fv3.at(i), proc_trifocal, false, FAC_ACTIVE));
-                fv3.at(i)->addFactor(cv123.at(i));
-                fv1.at(i)->addConstrainedBy(cv123.at(i));
-                fv2.at(i)->addConstrainedBy(cv123.at(i));
-            }
-
-        }
-
-};
-
-TEST_F(FactorAutodiffTrifocalMultiPointTest, solve_multi_point)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2.pos is fixed to set the unobservable scale
-     * C2.ori and all C3 are optimized
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->fix(); // This fixes the scale
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, keep scale
-    F1->getP()->setState( pos1   );
-    F1->getO()->setState( vquat1 );
-    F2->getP()->setState( pos2   ); // this fixes the scale
-    F2->getO()->setState((vquat2 + 0.2*Vector4s::Random()).normalized());
-    F3->getP()->setState( pos3   + 0.2*Vector3s::Random());
-    F3->getO()->setState((vquat3 + 0.2*Vector4s::Random()).normalized());
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    // Print results
-    WOLF_DEBUG("report: ", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), pos2  , 1e-10);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(), vquat2, 1e-10);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), pos3  , 1e-10);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(), vquat3, 1e-10);
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3s res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-10);
-    }
-
-}
-
-TEST_F(FactorAutodiffTrifocalMultiPointTest, solve_multi_point_scale)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2.pos is fixed to set the unobservable scale
-     * C2.ori and all C3 are optimized
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->fix(); // This fixes the scale
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, change scale
-    F1->getP()->setState( 2 * pos1 );
-    F1->getO()->setState(   vquat1 );
-    F2->getP()->setState( 2 * pos2 );
-    F2->getO()->setState((  vquat2 + 0.2*Vector4s::Random()).normalized());
-    F3->getP()->setState( 2 * pos3 + 0.2*Vector3s::Random());
-    F3->getO()->setState((  vquat3 + 0.2*Vector4s::Random()).normalized());
-
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    // Print results
-    WOLF_DEBUG("report: ", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), 2 * pos2, 1e-8);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(),   vquat2, 1e-8);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), 2 * pos3, 1e-8);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(),   vquat3, 1e-8);
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3s res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-    }
-}
-
-#include "base/factor/factor_autodiff_distance_3D.h"
-
-TEST_F(FactorAutodiffTrifocalMultiPointTest, solve_multi_point_distance)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2 and C3 are optimized
-     * The scale is observed through a distance factor
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->unfix(); // Estimate Cam 2 pos
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, change scale
-    F1->getP()->setState( pos1   );
-    F1->getO()->setState( vquat1 );
-    F2->getP()->setState( pos2   + 0.2*Vector3s::Random() );
-    F2->getO()->setState((vquat2 + 0.2*Vector4s::Random()).normalized());
-    F3->getP()->setState( pos3   + 0.2*Vector3s::Random());
-    F3->getO()->setState((vquat3 + 0.2*Vector4s::Random()).normalized());
-
-    // Add a distance factor to fix the scale
-    Scalar distance     = sqrt(2.0);
-    Scalar distance_std = 0.1;
-
-    CaptureBasePtr Cd = std::make_shared<CaptureBase>("DISTANCE", F3->getTimeStamp());
-    F3->addCapture(Cd);
-    FeatureBasePtr fd = std::make_shared<FeatureBase>("DISTANCE", Vector1s(distance), Matrix1s(distance_std * distance_std));
-    Cd->addFeature(fd);
-    FactorAutodiffDistance3DPtr cd = std::make_shared<FactorAutodiffDistance3D>(fd, F1, nullptr, false, FAC_ACTIVE);
-    fd->addFactor(cd);
-    F1->addConstrainedBy(cd);
-
-    cd->setStatus(FAC_INACTIVE);
-    std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-    WOLF_DEBUG("DISTANCE CONSTRAINT INACTIVE: \n", report);
-
-    problem->print(1,0,1,0);
-
-    cd->setStatus(FAC_ACTIVE);
-    report = ceres_manager->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    // Print results
-    WOLF_DEBUG("DISTANCE CONSTRAINT ACTIVE: \n", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), pos2  , 1e-8);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(), vquat2, 1e-8);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), pos3  , 1e-8);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(), vquat3, 1e-8);
-
-    Eigen::VectorXs F1_p = F1->getP()->getState();
-    Eigen::VectorXs F1_o = F1->getO()->getState();
-    Eigen::VectorXs F2_p = F2->getP()->getState();
-    Eigen::VectorXs F2_o = F2->getO()->getState();
-    Eigen::VectorXs F3_p = F3->getP()->getState();
-    Eigen::VectorXs F3_o = F3->getO()->getState();
-    Eigen::VectorXs S_p  = S ->getP()->getState();
-    Eigen::VectorXs S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3s res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3s::Zero(), 1e-8);
-    }
-}
-
-int main(int argc, char **argv)
-{
-    testing::InitGoogleTest(&argc, argv);
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalTest.solve_F1";
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalTest.solve_F2";
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalTest.solve_F3";
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalTest.solve_S";
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalTest.solve_multi_point";
-    //    ::testing::GTEST_FLAG(filter) = "FactorAutodiffTrifocalMultiPointTest.solve_multi_point_distance";
-    return RUN_ALL_TESTS();
-}
-
diff --git a/test/gtest_processor_tracker_feature_trifocal.cpp b/test/gtest_processor_tracker_feature_trifocal.cpp
deleted file mode 100644
index 814175a2a64c308a21027732e3b78c75d5409bc6..0000000000000000000000000000000000000000
--- a/test/gtest_processor_tracker_feature_trifocal.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-#include "utils_gtest.h"
-
-#include "base/wolf.h"
-#include "base/logging.h"
-
-#include "vision_utils.h"
-
-#include "base/processor/processor_tracker_feature_trifocal.h"
-#include "base/processor/processor_odom_3D.h"
-#include "base/capture/capture_image.h"
-#include "base/sensor/sensor_camera.h"
-
-using namespace Eigen;
-using namespace wolf;
-
-// Use the following in case you want to initialize tests with predefines variables or methods.
-//class ProcessorTrackerFeatureTrifocal_class : public testing::Test{
-//    public:
-//        virtual void SetUp()
-//        {
-//        }
-//};
-
-//TEST(ProcessorTrackerFeatureTrifocal, Constructor)
-//{
-//  std::cout << "\033[1;33m [WARN]:\033[0m gtest for ProcessorTrackerFeatureTrifocal Constructor is empty." << std::endl;
-//}
-
-//TEST(ProcessorTrackerFeatureTrifocal, Destructor)
-//{
-//  std::cout << "\033[1;33m [WARN]:\033[0m gtest for ProcessorTrackerFeatureTrifocal Destructor is empty." << std::endl;
-//}
-
-////[Class methods]
-//TEST(ProcessorTrackerFeatureTrifocal, trackFeatures)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal trackFeatures is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, correctFeatureDrift)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal correctFeatureDrift is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, voteForKeyFrame)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal voteForKeyFrame is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, detectNewFeatures)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal detectNewFeatures is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, createFactor)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal createFactor is empty." << std::endl;
-//}
-
-TEST(ProcessorTrackerFeatureTrifocal, KeyFrameCallback)
-{
-
-    using namespace wolf;
-    using std::shared_ptr;
-    using std::make_shared;
-    using std::static_pointer_cast;
-    using Eigen::Vector2s;
-
-    std::string wolf_root = _WOLF_ROOT_DIR;
-
-    Scalar dt = 0.01;
-
-    // Wolf problem
-    ProblemPtr problem = Problem::create("PO 3D");
-
-    // Install tracker (sensor and processor)
-    IntrinsicsCameraPtr intr = make_shared<IntrinsicsCamera>(); // TODO init params or read from YAML
-    intr->width  = 640;
-    intr->height = 480;
-    SensorCameraPtr sens_trk = make_shared<SensorCamera>((Eigen::Vector7s()<<0,0,0, 0,0,0,1).finished(),
-                                                         intr);
-
-    ProcessorParamsTrackerFeatureTrifocalPtr params_tracker_feature_trifocal = std::make_shared<ProcessorParamsTrackerFeatureTrifocal>();
-
-    //    params_tracker_feature_trifocal->name                           = "trifocal";
-    params_tracker_feature_trifocal->pixel_noise_std                = 1.0;
-    params_tracker_feature_trifocal->voting_active                  = true;
-    params_tracker_feature_trifocal->min_features_for_keyframe      = 5;
-    params_tracker_feature_trifocal->time_tolerance                 = dt/2;
-    params_tracker_feature_trifocal->min_features_for_keyframe      = 5;
-    params_tracker_feature_trifocal->pixel_noise_std                = 1.0;
-    params_tracker_feature_trifocal->max_new_features               = 5;
-    params_tracker_feature_trifocal->min_response_new_feature       = 25;
-    params_tracker_feature_trifocal->n_cells_h                      = 10;
-    params_tracker_feature_trifocal->n_cells_v                      = 10;
-    params_tracker_feature_trifocal->yaml_file_params_vision_utils  = wolf_root + "/src/examples/processor_tracker_feature_trifocal_vision_utils.yaml";
-
-    ProcessorTrackerFeatureTrifocalPtr proc_trk = make_shared<ProcessorTrackerFeatureTrifocal>(params_tracker_feature_trifocal);
-    proc_trk->configure(sens_trk);
-
-    problem->addSensor(sens_trk);
-    sens_trk->addProcessor(proc_trk);
-
-    // Install odometer (sensor and processor)
-    IntrinsicsOdom3DPtr params = std::make_shared<IntrinsicsOdom3D>();
-    params->min_disp_var = 0.000001;
-    params->min_rot_var  = 0.000001;
-    SensorBasePtr sens_odo = problem->installSensor("ODOM 3D", "odometer", (Vector7s() << 0,0,0,  0,0,0,1).finished(), params);
-    ProcessorParamsOdom3DPtr proc_odo_params = make_shared<ProcessorParamsOdom3D>();
-    proc_odo_params->voting_active   = true;
-    proc_odo_params->time_tolerance  = dt/2;
-    proc_odo_params->max_buff_length = 3;
-    ProcessorBasePtr proc_odo = problem->installProcessor("ODOM 3D", "odometer", sens_odo, proc_odo_params);
-
-    std::cout << "sensor & processor created and added to wolf problem" << std::endl;
-
-    // Sequence to test KeyFrame creations (callback calls)
-
-    // initialize
-    TimeStamp   t(0.0);
-    Vector7s    x; x << 0,0,0, 0,0,0,1;
-    Matrix6s    P = Matrix6s::Identity() * 0.000001;
-    problem->setPrior(x, P, t, dt/2);             // KF1
-
-    CaptureOdom3DPtr capt_odo = make_shared<CaptureOdom3D>(t, sens_odo, Vector6s::Zero(), P);
-
-    // Track
-    cv::Mat image(intr->height, intr->width, CV_8UC3); // OpenCV cv::Mat(rows, cols)
-    image *= 0; // TODO see if we want to use a real image
-    CaptureImagePtr capt_trk = make_shared<CaptureImage>(t, sens_trk, image);
-    proc_trk->process(capt_trk);
-
-    problem->print(2,0,1,0);
-
-    for (size_t ii=0; ii<8; ii++ )
-    {
-        // Move
-        t = t+dt;
-        WOLF_INFO("----------------------- ts: ", t , " --------------------------");
-
-        capt_odo->setTimeStamp(t);
-        proc_odo->process(capt_odo);
-
-        // Track
-        capt_trk = make_shared<CaptureImage>(t, sens_trk, image);
-        proc_trk->process(capt_trk);
-
-        problem->print(2,0,0,0);
-
-        // Only odom creating KFs
-        ASSERT_TRUE( problem->getLastKeyFrame()->getType().compare("PO 3D")==0 );
-    }
-}
-
-int main(int argc, char **argv)
-{
-    testing::InitGoogleTest(&argc, argv);
-    return RUN_ALL_TESTS();
-}
-