diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b7c22b8a2031d035c521e955c99912b283210f18
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+.vscode/
+.cproject
+.project
+.settings/language.settings.xml
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 68bf3258a40ab424b645d6ceec9a5fedc87a9315..8072367eaa3197adea46c0e86ab4461ef7b717dc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 2.8.3)
+cmake_minimum_required(VERSION 3.0.2)
 project(wolf_ros_vision)
 
 ## Compile as C++14
@@ -6,113 +6,32 @@ add_compile_options(-std=c++14)
 # -fPIC and -rdynamic ensure unique singleton instance across shared libraries (for factories) see: https://stackoverflow.com/a/8626922
 SET(CMAKE_CXX_FLAGS "-fPIC -rdynamic")
 
-# SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/wolf_ros_wrapper/cmake_modules")
 ## Find catkin macros and libraries
-## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
-## is used, also find other catkin packages
 find_package(catkin REQUIRED COMPONENTS
   roscpp
+  rospy
   sensor_msgs
+  std_msgs
+  tf
+  dynamic_reconfigure
   wolf_ros_node
   cv_bridge
   image_transport
 )
 
+
+
 ## System dependencies are found with CMake's conventions
-# find_package(Boost REQUIRED COMPONENTS system)
-# find_package(Ceres REQUIRED)
-# find_package(Eigen3 REQUIRED)
+find_package(OpenCV REQUIRED)
 find_package(wolfcore REQUIRED)
 find_package(wolfvision REQUIRED)
 
-## Uncomment this if the package has a setup.py. This macro ensures
-## modules and global scripts declared therein get installed
-## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
-# catkin_python_setup()
-
-################################################
-## Declare ROS messages, services and actions ##
-################################################
-
-## To declare and build messages, services or actions from within this
-## package, follow these steps:
-## * Let MSG_DEP_SET be the set of packages whose message types you use in
-##   your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
-## * In the file package.xml:
-##   * add a build_depend tag for "message_generation"
-##   * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
-##   * If MSG_DEP_SET isn't empty the following dependency has been pulled in
-##     but can be declared for certainty nonetheless:
-##     * add a exec_depend tag for "message_runtime"
-## * In this file (CMakeLists.txt):
-##   * add "message_generation" and every package in MSG_DEP_SET to
-##     find_package(catkin REQUIRED COMPONENTS ...)
-##   * add "message_runtime" and every package in MSG_DEP_SET to
-##     catkin_package(CATKIN_DEPENDS ...)
-##   * uncomment the add_*_files sections below as needed
-##     and list every .msg/.srv/.action file to be processed
-##   * uncomment the generate_messages entry below
-##   * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
-
-## Generate messages in the 'msg' folder
-# add_message_files(
-#   FILES
-#   Message1.msg
-#   Message2.msg
-# )
-
-## Generate services in the 'srv' folder
-# add_service_files(
-#   FILES
-#   Service1.srv
-#   Service2.srv
-# )
-
-## Generate actions in the 'action' folder
-# add_action_files(
-#   FILES
-#   Action1.action
-#   Action2.action
-# )
-
-## Generate added messages and services with any dependencies listed here
-#generate_messages(
-#  DEPENDENCIES
-#)
-
-################################################
-## Declare ROS dynamic reconfigure parameters ##
-################################################
-
-## To declare and build dynamic reconfigure parameters within this
-## package, follow these steps:
-## * In the file package.xml:
-##   * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
-## * In this file (CMakeLists.txt):
-##   * add "dynamic_reconfigure" to
-##     find_package(catkin REQUIRED COMPONENTS ...)
-##   * uncomment the "generate_dynamic_reconfigure_options" section below
-##     and list every .cfg file to be processed
-
-## Generate dynamic reconfigure parameters in the 'cfg' folder
-#generate_dynamic_reconfigure_options(
-#  cfg/WolfROS.cfg
-#)
-
 ###################################
 ## catkin specific configuration ##
 ###################################
-## The catkin_package macro generates cmake config files for your package
-## Declare things to be passed to dependent projects
-## INCLUDE_DIRS: uncomment this if your package contains header files
-## LIBRARIES: libraries you create in this project that dependent projects also need
-## CATKIN_DEPENDS: catkin_packages dependent projects also need
-## DEPENDS: system dependencies of this project that dependent projects also need
+
 catkin_package(
-#  INCLUDE_DIRS include
-#  LIBRARIES wolf_ros1
-#  CATKIN_DEPENDS roscpp sensor_msgs std_msgs
-#  DEPENDS system_lib
+  CATKIN_DEPENDS rospy sensor_msgs
 )
 
 ###########
@@ -130,91 +49,26 @@ include_directories(
   ${catkin_INCLUDE_DIRS}
   ${CERES_INCLUDE_DIRS}
 )
-# link_directories(/usr/local/lib/iri-algorithms)
-## Declare a C++ library
-add_library(subscriber_${PROJECT_NAME}
-  src/subscriber_camera.cpp)
-#add_library(publisher_${PROJECT_NAME}
-#  src/publisher_camera.cpp)
 
-## Add cmake target dependencies of the library
-## as an example, code may need to be generated before libraries
-## either from message generation or dynamic reconfigure
-# add_dependencies(${PROJECT_NAME}_node ${PROJECT_NAME}_gencfg)
-# add_dependencies(${PROJECT_NAME}_visualizer ${PROJECT_NAME}_gencfg)
-
-## Declare a C++ executable
-## With catkin_make all packages are built within a single CMake context
-## The recommended prefix ensures that target names across packages don't collide
-
-## Rename C++ executable without prefix
-## The above recommended prefix causes long target names, the following renames the
-## target back to the shorter version for ease of user use
-## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg"
-# set_target_properties(${PROJECT_NAME} PROPERTIES OUTPUT_NAME node PREFIX "")
-
-## Add cmake target dependencies of the executable
-## same as for the library above
-#add_dependencies(${PROJECT_NAME}_node ${PROJECT_NAME}_gencfg)
-#add_dependencies(${PROJECT_NAME}_visualizer ${PROJECT_NAME}_gencfg)
+## Declare a C++ library
+add_library(publisher_${PROJECT_NAME} src/publisher_vision.cpp)
+add_library(subscriber_${PROJECT_NAME} src/subscriber_camera.cpp)
 
 ## Specify libraries to link a library or executable target against
 target_link_libraries(subscriber_${PROJECT_NAME}
-            ${wolfcore_LIBRARIES}
-            ${wolfvision_LIBRARIES}
-            ${vision_utils_LIBRARY}
-            ${catkin_LIBRARIES}
-            )
-#target_link_libraries(publisher_${PROJECT_NAME}
-#            ${wolfcore_LIBRARIES}
-#            ${wolfvision_LIBRARIES}
-#            ${sensor_msgs_LIBRARIES}
-#            )
-
-#############
-## Install ##
-#############
-
-# all install targets should use catkin DESTINATION variables
-# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
-
-## Mark executable scripts (Python etc.) for installation
-## in contrast to setup.py, you can choose the destination
-# install(PROGRAMS
-#   scripts/my_python_script
-#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
-# )
-
-## Mark executables and/or libraries for installation
-# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}
-#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
-#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
-#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
-# )
-
-## Mark cpp header files for installation
-# install(DIRECTORY include/${PROJECT_NAME}/
-#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
-#   FILES_MATCHING PATTERN "*.h"
-#   PATTERN ".svn" EXCLUDE
-# )
-
-## Mark other files for installation (e.g. launch and bag files, etc.)
-# install(FILES
-#   # myfile1
-#   # myfile2
-#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
-# )
-
-#############
-## Testing ##
-#############
-
-## Add gtest based cpp test target and link libraries
-# catkin_add_gtest(${PROJECT_NAME}-test test/test_wolf_ros.cpp)
-# if(TARGET ${PROJECT_NAME}-test)
-#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
-# endif()
-
-## Add folders to be run by python nosetests
-# catkin_add_nosetests(test)
+                                ${wolfcore_LIBRARIES}
+                                ${wolfvision_LIBRARIES}
+                                ${catkin_LIBRARIES}
+                                ${sensor_msgs_LIBRARIES}
+                                )
+target_link_libraries(publisher_${PROJECT_NAME}
+                                ${wolfcore_LIBRARIES}
+                                ${wolfvision_LIBRARIES}
+                                ${catkin_LIBRARIES}
+                                ${sensor_msgs_LIBRARIES}
+                                )
+
+# Declar a python script
+catkin_install_python(PROGRAMS scripts/publisher_camera_info.py
+  DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+)
diff --git a/include/publisher_vision.h b/include/publisher_vision.h
new file mode 100644
index 0000000000000000000000000000000000000000..4a1ddb4739b9f5638ffef8554294d670b61d948d
--- /dev/null
+++ b/include/publisher_vision.h
@@ -0,0 +1,191 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+#ifndef PUBLISHER_VISION_DEBUG_H_
+#define PUBLISHER_VISION_DEBUG_H_
+
+
+/**************************
+ *      WOLF includes     *
+ **************************/
+#include "core/common/wolf.h"
+#include "vision/processor/processor_visual_odometry.h"
+#include <core/utils/params_server.h>
+
+#include "publisher.h"
+
+/**************************
+ *      ROS includes      *
+ **************************/
+#include <ros/ros.h>
+#include <sensor_msgs/Image.h>
+#include <cv_bridge/cv_bridge.h>
+#include <image_transport/image_transport.h>
+
+
+
+namespace wolf
+{
+
+enum Color {BLUE = 0,
+            GREEN, 
+            YELLOW, 
+            MAGENTA, 
+            CYAN, 
+            GREY,
+            RED
+};
+class PublisherVisionDebug : public Publisher
+{
+    public:
+        PublisherVisionDebug(const std::string& _unique_name,
+                             const ParamsServer& _server,
+                             const ProblemConstPtr _problem);
+        WOLF_PUBLISHER_CREATE(PublisherVisionDebug);
+
+        virtual ~PublisherVisionDebug(){};
+
+        void initialize(ros::NodeHandle &nh, const std::string& topic) override;
+
+        bool ready() override;
+        void publishDerived() override;
+
+    protected:
+        ProcessorVisualOdometryConstPtr processor_vision_;
+        CaptureBaseConstPtr             last_capture_;
+        std::string                     topic_preprocessor_;
+
+        struct Feature {
+            double  thickness_;
+            int     size_pix_;
+            Color   color_;
+        }; 
+        
+        struct Tracks {
+            bool    show_id_;
+            double  size_id_;
+            double  thickness_;
+            bool    min_max_on_alive_tracks_;
+            Color   color_;
+            int     min_luminosity_;
+            int     max_luminosity_;
+            Feature feature_last_;
+            Feature feature_kfs_;
+        };
+        struct Tracks_preprocess {
+            bool    show_;
+            bool    show_on_diff_topic_;
+            bool    show_features_;
+            double  thickness_;
+            Color   color_;
+        };
+        struct Landmarks {
+            bool    show_id_;
+            double  size_id_;
+            int     size_pix_;
+            Color   color_;
+        };
+
+        Tracks            tracks_;
+        Tracks_preprocess tracks_preprocess_;
+        Landmarks         landmarks_;
+
+        image_transport::Publisher      publisher_image_;
+        image_transport::Publisher      publisher_image_preprocess_;
+        image_transport::ImageTransport img_transport_;
+
+    private:
+        cv::Scalar primaryColor(Color _color);
+        cv::Scalar colorTrackAndFeatures(int _nb_feature_in_track,
+                                                             int _max_feature_in_tracks,
+                                                             int _min_feature_in_tracks,
+                                                             Color _color_of_features);
+        /*
+            Convert a string corresponding to a color to its corresponding Color enum value
+        */
+        Color                       colorStringToEnum(const std::string _color);
+
+        /*
+            Search for the maximum and minimum of features in one track in the track matrix
+        */
+        std::pair<int, int>         minMaxAliveTrackLength(const TrackMatrix &_track_matrix, CaptureBaseConstPtr _cap_img) const;
+
+        /*
+            Return a unique RGB color vector depending on the lenght of the track given,
+            so the longer is the track the darker it is
+        */
+        // std::vector<int>            colorTrackAndFeatures(int _nb_feature_in_track, int _max_feature_in_tracks, int _min_feature_in_tracks, Color _color_of_features);
+        
+        /*
+            Return a  RGB color vector coresponding to the color asked
+        */
+        // std::vector<int>            primaryColor(Color _color_of_lmks);
+
+        /*
+            Change an image to show tracks and features within it
+        */
+        void                        showTracks(cv::Mat _image, const TrackMatrix& _track_matrix, CaptureBaseConstPtr _cap_img);
+        
+        /*
+            Return the transformation from world to camera
+        */
+        Eigen::Isometry3d           getTcw               (const CaptureBaseConstPtr _capture) const;
+
+        /*
+            Transform Tcw into matrix for multiplication purpose
+        */
+        Eigen::Matrix4d             getTcwMatrix         (const CaptureBaseConstPtr _capture) const;
+        
+        /*
+            Return the projection matrix of a sensor (linked to a capture)
+        */
+        Eigen::Matrix<double, 3, 4> getProjectionMatrix  (const CaptureBaseConstPtr _capture) const;
+
+        /*
+            Return only the landmark associated to a feature in last keyframes
+        */
+        LandmarkBaseConstPtr        getAssociatedLandmark(const TrackMatrix& _track_matrix, const FeatureBaseConstPtr& _ftr);
+
+        /*
+            Return the projection of a landmark in the frame
+        */
+        Eigen::Vector2d             projectLandmarkHp    (const Eigen::Matrix<double, 3, 4>& _trf_proj_mat, const LandmarkBaseConstPtr _lmk);
+        
+        /*
+            Print one landmark into the capture
+        */
+        void                        showLandmark         (cv::Mat _image, const TrackMatrix& _track_matrix, const FeatureBasePtr& _ftr);
+        
+        /*
+            Print all of the landmarks associated with the capture
+        */
+        void                        showLandmarks        (cv::Mat _image, const TrackMatrix& _track_matrix, const CaptureBaseConstPtr& _cap);
+
+        /*
+            Change an image to show tracks and features from the preprocess within it
+        */
+        void                        showTracksPreprocess (cv::Mat _image, const CaptureImageConstPtr& _origin, const CaptureImageConstPtr& _last);
+};
+
+WOLF_REGISTER_PUBLISHER(PublisherVisionDebug)
+}
+
+#endif
diff --git a/include/subscriber_camera.h b/include/subscriber_camera.h
index ef9af1e2d3cde2367fe12d0923ebf85312277068..c9e67e378fb611b3e1a49d8d4b8af714792a2a07 100644
--- a/include/subscriber_camera.h
+++ b/include/subscriber_camera.h
@@ -62,6 +62,7 @@ class SubscriberCamera : public Subscriber
         SubscriberCamera(const std::string& _unique_name,
                          const ParamsServer& _server,
                          const SensorBasePtr _sensor_ptr);
+        virtual ~SubscriberCamera(){};
         WOLF_SUBSCRIBER_CREATE(SubscriberCamera);
 
         virtual void initialize(ros::NodeHandle& nh, const std::string& topic);
diff --git a/package.xml b/package.xml
index cb7b3139deb15538582dfefb2378ee701d7cbc29..6f6b5ca5a651e155b8f0f4181db3c683d221f911 100644
--- a/package.xml
+++ b/package.xml
@@ -4,69 +4,33 @@
   <version>0.0.0</version>
   <description>The wolf_ros package of WOLF project</description>
 
-  <!-- One maintainer tag required, multiple allowed, one person per tag -->
-  <!-- Example:  -->
-  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
   <maintainer email="jvallve@iri.upc.edu">jvallve</maintainer>
 
 
-  <!-- One license tag required, multiple allowed, one license per tag -->
-  <!-- Commonly used license strings: -->
-  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
   <license>TODO</license>
 
-
-  <!-- Url tags are optional, but multiple are allowed, one per tag -->
-  <!-- Optional attribute type can be: website, bugtracker, or repository -->
-  <!-- Example: -->
-  <!-- <url type="website">http://wiki.ros.org/wolf_ros1</url> -->
-
-
-  <!-- Author tags are optional, multiple are allowed, one per tag -->
-  <!-- Authors do not have to be maintainers, but could be -->
-  <!-- Example: -->
-  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
-
-
-  <!-- The *depend tags are used to specify dependencies -->
-  <!-- Dependencies can be catkin packages or system dependencies -->
-  <!-- Examples: -->
-  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
-  <!--   <depend>roscpp</depend> -->
-  <!--   Note that this is equivalent to the following: -->
-  <!--   <build_depend>roscpp</build_depend> -->
-  <!--   <exec_depend>roscpp</exec_depend> -->
-  <!-- Use build_depend for packages you need at compile time: -->
-  <!--   <build_depend>message_generation</build_depend> -->
-  <!-- Use build_export_depend for packages you need in order to build against this package: -->
-  <!--   <build_export_depend>message_generation</build_export_depend> -->
-  <!-- Use buildtool_depend for build tool packages: -->
-  <!--   <buildtool_depend>catkin</buildtool_depend> -->
-  <!-- Use exec_depend for packages you need at runtime: -->
-  <!--   <exec_depend>message_runtime</exec_depend> -->
-  <!-- Use test_depend for packages you need only for testing: -->
-  <!--   <test_depend>gtest</test_depend> -->
-  <!-- Use doc_depend for packages you need only for building documentation: -->
-  <!--   <doc_depend>doxygen</doc_depend> -->
   <buildtool_depend>catkin</buildtool_depend>
+
   <build_depend>roscpp</build_depend>
+  <build_depend>rospy</build_depend>
   <build_depend>sensor_msgs</build_depend>
   <build_depend>std_msgs</build_depend>
   <build_depend>tf</build_depend>
   <build_depend>wolf_ros_node</build_depend>
+
   <build_export_depend>roscpp</build_export_depend>
   <build_export_depend>sensor_msgs</build_export_depend>
   <build_export_depend>std_msgs</build_export_depend>
   <build_export_depend>tf</build_export_depend>
+  
   <exec_depend>roscpp</exec_depend>
+  <exec_depend>rospy</exec_depend>
   <exec_depend>sensor_msgs</exec_depend>
   <exec_depend>std_msgs</exec_depend>
   <exec_depend>tf</exec_depend>
   <exec_depend>wolf_ros_node</exec_depend>
 
-  <!-- The export tag contains other, unspecified, tags -->
   <export>
-    <!-- Other tools can request additional information be placed here -->
 
   </export>
 </package>
diff --git a/scripts/publisher_camera_info.py b/scripts/publisher_camera_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a38fd461d9c048e36e42d2a38551e93f90b05ce
--- /dev/null
+++ b/scripts/publisher_camera_info.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+import sys
+import copy
+from pure_eval import CannotEval
+import yaml
+import numpy as np
+import rospy
+from sensor_msgs.msg import CameraInfo
+from sensor_msgs.msg import Image
+
+def kvec2mat(fx, fy, cx, cy):
+    return np.array([
+    fx, 0,  cx,
+    0,  fy, cy,
+    0,  0,  0
+]).reshape((3,3))
+    
+    
+def kmat2vec(K):
+    # fx, fy, cx, cy
+    return [K[0,0], K[1,1], K[0,2], K[1,2]]
+    
+
+class CameraInfoPublisher:
+    
+    def __init__(self, topic_image_info_out, camera_yaml_path) -> None:
+        self.topic_image_info_out = topic_image_info_out
+        self.camera_yaml_path = camera_yaml_path
+        self.camera_info_msg = self.read_camera_info(camera_yaml_path)
+        self.publisher = rospy.Publisher(topic_image_info_out, CameraInfo, queue_size=10)
+    
+    def read_camera_info(self, path: str) -> CameraInfo:
+        with open(path, 'r') as f:
+            d = yaml.safe_load(f)
+        
+        # extract the important info and format them
+        return self.extract_info_standard_yaml(d)
+    
+    def extract_info_standard_yaml(self, params: dict) -> CameraInfo:
+        # The yaml file should have the same structure as the ones used for wolf sensor camera
+        
+        w = params['width']
+        h = params['height']
+        Kvec = params['camera_matrix']['data']
+        distortion_model = params['distortion_model']
+        D = params['distortion_coefficients']['data']
+        R = params['rectification_matrix']['data']
+        P = params['projection_matrix']['data']
+        
+        msg_args = {
+            'width': w,
+            'height': h,
+            'K': Kvec,
+            'distortion_model': distortion_model,
+            'D': D,
+            'R': R,
+            'P': P,
+        }
+        
+        return CameraInfo(**msg_args)
+        
+    
+    def publish_cam_info(self, image_msg: Image):
+        # take the header from image msg, replace the camera info header and then publish
+        self.camera_info_msg.header = image_msg.header
+        self.publisher.publish(self.camera_info_msg)
+    
+
+if __name__ == '__main__':
+    camera_yaml_path = rospy.get_param("camera_yaml_path")
+    topic_image_raw_in = rospy.get_param("topic_image_raw_in")
+    topic_image_info_out = rospy.get_param("topic_image_info_out")
+
+    
+    rospy.init_node('camera_info_publisher', anonymous=True)
+
+    ci_publisher = CameraInfoPublisher(topic_image_info_out, camera_yaml_path)
+    sub = rospy.Subscriber(topic_image_raw_in, Image, ci_publisher.publish_cam_info)
+    print('Listening on ', sub.name)
+    
+    rospy.spin()
\ No newline at end of file
diff --git a/src/publisher_vision.cpp b/src/publisher_vision.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..74521824de47c2c733f52c2e4c39342c83fb7c51
--- /dev/null
+++ b/src/publisher_vision.cpp
@@ -0,0 +1,674 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+
+#include "publisher_vision.h"
+#include "core/processor/track_matrix.h"
+#include "vision/capture/capture_image.h"
+#include "vision/feature/feature_point_image.h"
+
+#include <image_transport/image_transport.h>
+#include <sensor_msgs/image_encodings.h>
+#include <cv_bridge/cv_bridge.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <ros/ros.h>
+
+#include <vector>
+#include <map>
+#include <iterator>
+
+namespace wolf
+{
+
+Color PublisherVisionDebug::colorStringToEnum(const std::string _color)
+{
+    if (_color == "BLUE")
+        return Color::BLUE;
+    else if (_color == "GREEN")
+        return Color::GREEN;
+    else if (_color == "YELLOW")
+        return Color::YELLOW;
+    else if (_color == "MAGENTA")
+        return Color::MAGENTA;
+    else if (_color == "CYAN")
+        return Color::CYAN;
+    else if (_color == "GREY")
+        return Color::GREY;
+    else if (_color == "RED")
+        return Color::RED;
+    else {
+        std::cout << _color << " color not available! Defaulting to CYAN." << std::endl;
+        return Color::CYAN;
+    }
+}
+
+PublisherVisionDebug::PublisherVisionDebug(const std::string &_unique_name,
+                                           const ParamsServer &_server,
+                                           const ProblemConstPtr _problem) :
+        Publisher(_unique_name, _server, _problem),
+        processor_vision_(nullptr),
+        last_capture_(nullptr),
+        img_transport_(ros::NodeHandle())
+{
+    // if user do not provide processor's name, first processor of type PublisherVisionDebug is taken
+    auto processor_name                    = getParamWithDefault<std::string>(_server, prefix_ + "/processor_name", "");
+    topic_preprocessor_                    = getParamWithDefault<std::string>(_server, prefix_ + "/topic_preprocessor", "/debug_image_prepocessor");
+    //Tracks      
+    tracks_.show_id_                       = getParamWithDefault<bool>(_server, prefix_ + "/tracks/show_id", false);
+    tracks_.size_id_                       = getParamWithDefault<double>(_server, prefix_ + "/tracks/size_id", 0.5);
+    tracks_.thickness_                     = getParamWithDefault<double>(_server, prefix_ + "/tracks/thickness", 1.5);
+    tracks_.min_max_on_alive_tracks_       = getParamWithDefault<bool>(_server, prefix_ + "/tracks/min_max_on_alive_tracks", false);
+    std::string str_color_tracks           = getParamWithDefault<std::string>(_server, prefix_ + "/tracks/color", "CYAN");
+    tracks_.color_                         = colorStringToEnum(str_color_tracks);
+    tracks_.min_luminosity_                = getParamWithDefault<int>(_server, prefix_ + "/tracks/min_lum", 100);
+    tracks_.max_luminosity_                = getParamWithDefault<int>(_server, prefix_ + "/tracks/max_lum", 450);
+    tracks_.min_luminosity_                = (tracks_.min_luminosity_ < 0   ? 0   : tracks_.min_luminosity_);
+    tracks_.max_luminosity_                = (tracks_.max_luminosity_ > 510 ? 510 : tracks_.max_luminosity_);
+    //Feature_last      
+    tracks_.feature_last_.thickness_       = getParamWithDefault<double>(_server, prefix_ + "/tracks/feature_last/thickness", -1);
+    tracks_.feature_last_.size_pix_        = getParamWithDefault<int>(_server, prefix_ + "/tracks/feature_last/size_pix", 2);
+    std::string str_color_ftr_last         = getParamWithDefault<std::string>(_server, prefix_ + "/tracks/feature_last/color", "RED");
+    tracks_.feature_last_.color_           = colorStringToEnum(str_color_ftr_last);
+    //Feature_kfs      
+    tracks_.feature_kfs_.thickness_        = getParamWithDefault<double>(_server, prefix_ + "/tracks/feature_kfs/thickness", 0.1);
+    tracks_.feature_kfs_.size_pix_         = getParamWithDefault<int>(_server, prefix_ + "/tracks/feature_kfs/size_pix", 1);
+      
+    //Landmarks      
+    landmarks_.show_id_                    = getParamWithDefault<bool>(_server, prefix_ + "/landmarks/show_id", false);
+    landmarks_.size_id_                    = getParamWithDefault<double>(_server, prefix_ + "/landmarks/size_id", 0.5);
+    landmarks_.size_pix_                   = getParamWithDefault<int>(_server, prefix_ + "/landmarks/size_pix", 5);
+    std::string str_color_landmarks_       = getParamWithDefault<std::string>(_server, prefix_ + "/landmarks/color", "CYAN");
+    landmarks_.color_                      = colorStringToEnum(str_color_landmarks_);
+
+    //Tracks_preprocess
+    tracks_preprocess_.show_               = getParamWithDefault<bool>(_server, prefix_ + "/tracks_preprocess/show", false);
+    tracks_preprocess_.show_on_diff_topic_ = getParamWithDefault<bool>(_server, prefix_ + "/tracks_preprocess/show_on_diff_topic", false);
+    tracks_preprocess_.show_features_      = getParamWithDefault<bool>(_server, prefix_ + "/tracks_preprocess/show_features", false);
+    tracks_preprocess_.thickness_          = getParamWithDefault<double>(_server, prefix_ + "/tracks_preprocess/thickness", 1.5);
+    std::string str_colortracks_preprocess = getParamWithDefault<std::string>(_server, prefix_ + "/tracks_preprocess/color", "CYAN");
+    tracks_preprocess_.color_              = colorStringToEnum(str_colortracks_preprocess);
+
+
+    // search the processor
+    if (processor_name == "")
+        throw std::runtime_error(
+                "PublisherVisionDebug: name of processor of type ProcessorVisualOdometry not provided");
+
+    ProcessorBaseConstPtr proc = problem_->findProcessor(processor_name);
+    if (not proc)
+        throw std::runtime_error(
+                "PublisherVisionDebug: processor " + processor_name + " not found.");
+    processor_vision_ = std::dynamic_pointer_cast<const ProcessorVisualOdometry>(proc);
+    if (not processor_vision_)
+        throw std::runtime_error(
+                "PublisherVisionDebug: processor " + processor_name + " is not of type ProcessorVisualOdometry");
+
+}
+
+void PublisherVisionDebug::initialize(ros::NodeHandle &nh, const std::string &topic)
+{
+    img_transport_              = image_transport::ImageTransport(nh);
+    publisher_image_            = img_transport_.advertise(topic, 10);
+    publisher_image_preprocess_ = img_transport_.advertise(topic_preprocessor_, 10);
+}
+
+bool PublisherVisionDebug::ready()
+{
+    // skip if the same last capture or null last capture
+    if (last_capture_ == processor_vision_->getLast() or not processor_vision_->getLast())
+        return false;
+
+    return true;
+}
+
+void PublisherVisionDebug::publishDerived()
+{
+    if (last_capture_ == processor_vision_->getLast() or not processor_vision_->getLast())
+        return;
+
+    // Get capture image
+    auto cap_img        = std::dynamic_pointer_cast<const CaptureImage>(processor_vision_->getLast());
+    auto cap_img_origin = std::dynamic_pointer_cast<const CaptureImage>(processor_vision_->getOrigin());
+
+    assert(cap_img          != nullptr && "Received Capture is not of type CaptureImage!");
+    assert(cap_img_origin   != nullptr && "Received origin Capture is not of type CaptureImage!");
+
+    auto track_matrix = processor_vision_->getTrackMatrix(); // copy track matrix
+
+    // Draw and publish debug image
+    try
+    {
+        // Extract cv image
+        cv::Mat cv_img_debug;
+        cv::cvtColor(cap_img->getImage(), cv_img_debug, cv::COLOR_GRAY2BGR);
+
+
+        // Draw all tracks
+        showTracks(cv_img_debug, track_matrix, cap_img);
+
+        // Draw preprocessor tracks in the same image
+        if (tracks_preprocess_.show_ && !(tracks_preprocess_.show_on_diff_topic_))
+        {
+            showTracksPreprocess(cv_img_debug, cap_img_origin, cap_img);
+        }
+
+        // Draw all landmarks
+        showLandmarks(cv_img_debug, track_matrix, cap_img);
+
+
+        // Convert to image msg
+        cv_bridge::CvImagePtr cv_msg    = boost::make_shared<cv_bridge::CvImage>();
+        cv_msg->header.stamp.sec        = cap_img->getTimeStamp().getSeconds();
+        cv_msg->header.stamp.nsec       = cap_img->getTimeStamp().getNanoSeconds();
+        cv_msg->encoding                = sensor_msgs::image_encodings::BGR8;
+        cv_msg->image                   = cv_img_debug;
+
+
+        // convert to proper ROS type and publish
+        auto cv_msg_ros = cv_msg->toImageMsg();
+        publisher_image_.publish(cv_msg_ros);
+        
+        if (tracks_preprocess_.show_ && tracks_preprocess_.show_on_diff_topic_)
+        {
+            // Draw preprocessor tracks in a separate image
+            cv::Mat cv_img_debug_preprocess;
+            cv::cvtColor(cap_img->getImage(), cv_img_debug_preprocess, cv::COLOR_GRAY2BGR);
+            showTracksPreprocess(cv_img_debug_preprocess, cap_img_origin, cap_img);
+            cv_bridge::CvImagePtr cv_msg_preprocess = boost::make_shared<cv_bridge::CvImage>();
+            cv_msg_preprocess->header.stamp.sec     = cap_img->getTimeStamp().getSeconds();
+            cv_msg_preprocess->header.stamp.nsec    = cap_img->getTimeStamp().getNanoSeconds();
+            cv_msg_preprocess->encoding             = sensor_msgs::image_encodings::BGR8;
+            cv_msg_preprocess->image                = cv_img_debug_preprocess;
+            auto cv_msg_ros_preprocess              = cv_msg_preprocess->toImageMsg();
+            publisher_image_preprocess_             .publish(cv_msg_ros_preprocess);
+        }
+
+        // store to avoid republishing the same capture
+        auto last_capture_ = processor_vision_->getLast();
+    }
+    catch (const std::exception &e)
+    {
+        std::cerr << "Could not publish debug image... " << e.what() << '\n';
+    }
+}
+
+std::pair<int, int> PublisherVisionDebug::minMaxAliveTrackLength(const TrackMatrix &_track_matrix, CaptureBaseConstPtr _cap_img) const
+{
+
+    SnapshotConst alive_features = _track_matrix.snapshot(_cap_img);
+
+    int max = -1;
+    int min = 10;
+
+    for (auto alive_feat : alive_features)
+    {
+        TrackConst track_alive = _track_matrix.track(alive_feat.second->trackId());
+
+        int count = track_alive.size();
+
+        if (count > max)
+        {
+            max = count;
+        }
+        if (count < min)
+        {
+            min = count;
+        }
+    }
+    return std::pair<int, int>(min, max);
+}
+
+
+cv::Scalar PublisherVisionDebug::colorTrackAndFeatures(int _nb_feature_in_track,
+                                                       int _max_feature_in_tracks,
+                                                       int _min_feature_in_tracks,
+                                                       Color _color_of_features)
+{
+    int R = 0;
+    int G = 0;
+    int B = 0;
+
+    double alpha;
+
+    if (_max_feature_in_tracks != _min_feature_in_tracks)
+    {
+        alpha = (double)(_nb_feature_in_track - _min_feature_in_tracks)
+                / (double)(_max_feature_in_tracks - _min_feature_in_tracks); // to interval [0,1]
+    }
+    else
+    {
+        alpha = 0; // put "full luminosity as a default value"
+    }
+
+    double lum = (tracks_.min_luminosity_ + (tracks_.max_luminosity_ - tracks_.min_luminosity_) * (1 - alpha));
+    if (lum > 255)
+    {
+        R = 255;
+        G = round(lum) - 255;
+        B = round(lum) - 255;
+    }
+    else
+    {
+        R = round(lum);
+        G = 0;
+        B = 0;
+    }
+
+    switch (_color_of_features)
+    {
+        case BLUE:
+            B = R;
+            R = G;
+            break;
+        case GREEN:
+            G = R;
+            R = B;
+            break;
+        case YELLOW:
+            G = R;
+            break;
+        case MAGENTA:
+            B = R;
+            break;
+        case CYAN:
+            B = R;
+            R = G;
+            G = B;
+            break;
+        case GREY:
+            B = R;
+            G = R;
+            break;
+        case RED:
+            break;
+        default:
+            break;
+    }
+
+    cv::Scalar color(B,G,R,0);
+
+    return color;
+}
+
+
+cv::Scalar PublisherVisionDebug::primaryColor(Color _color)
+{
+    int R = 0;
+    int G = 0;
+    int B = 0;
+
+    switch (_color)
+    {
+        case BLUE:
+            B = 255;
+            break;
+        case GREEN:
+            G = 255;
+            break;
+        case RED:
+            R = 255;
+            break;
+        case YELLOW:
+            R = 255;
+            G = 255;
+            break;
+        case MAGENTA:
+            R = 255;
+            B = 255;
+            break;
+        case CYAN:
+            G = 255;
+            B = 255;
+            break;
+        case GREY:
+            R = 128;
+            G = 128;
+            B = 128;
+            break;
+        default:
+            break;
+    }
+
+    cv::Scalar color(B,G,R,0);
+
+    return color;
+}
+
+
+
+void PublisherVisionDebug::showTracks(cv::Mat _image,
+                                      const TrackMatrix &_track_matrix,
+                                      CaptureBaseConstPtr _capture_img)
+{
+    std::pair<int, int> min_max_track_length;
+
+    min_max_track_length = minMaxAliveTrackLength(_track_matrix, _capture_img);
+
+    int min_track_length = min_max_track_length.first;
+    int max_track_length = min_max_track_length.second;
+
+    // color of alive feature
+    cv::Scalar color_last_kpt = primaryColor(tracks_.feature_last_.color_);
+
+//    // 1. Draw tracks with their keyframes
+//    for (auto track_id : _track_matrix.trackIds())
+//    {
+//        auto track      = _track_matrix.trackAsVector(track_id);
+//        auto track_kfs  = _track_matrix.trackAtKeyframes(track_id);
+//
+//        // determine color and luminosity of track
+//        cv::Scalar color          = colorTrackAndFeatures(track.size(),
+//                                                          max_feature_in_track,
+//                                                          min_feature_in_track,
+//                                                          tracks_.color_);
+//
+//        // draw track line
+//        for (auto ftr_it = track.rbegin(); ftr_it < track.rend(); ++ftr_it)
+//        {
+//            if (ftr_it == track.rbegin()) continue; // skip first time since prev(rbegin) is undefined
+//
+//            auto ftr_prev_it = std::prev(ftr_it);
+//
+//            // draw line in between keyframes (as well as last KF and current frame)
+//            cv::line(_image,
+//                     cv::Point((*ftr_it)->getMeasurement(0),
+//                               (*ftr_it)->getMeasurement(1)),
+//                     cv::Point((*ftr_prev_it)->getMeasurement(0),
+//                               (*ftr_prev_it)->getMeasurement(1)),
+//                     color,
+//                     tracks_.thickness_);
+//
+//        }
+//
+//        // draw kfs
+//        for (auto pair_ts_ftr : track_kfs)
+//        {
+//            // mark features of previous keyframes
+//            cv::circle(_image,
+//                       cv::Point(pair_ts_ftr.second->getMeasurement(0),
+//                                 pair_ts_ftr.second->getMeasurement(1)),
+//                       tracks_.feature_kfs_.size_pix_,
+//                       color,
+//                       tracks_.feature_kfs_.thickness_);
+//
+//        }
+//    }
+//
+//    // 2. Draw features in last image
+//    for (auto ftr : alive_features)
+//    {
+//        // draw current feature
+//        cv::circle(_image,
+//                   cv::Point(ftr.second->getMeasurement(0),
+//                             ftr.second->getMeasurement(1)),
+//                   tracks_.feature_last_.size_pix_,
+//                   color_last_kpt,
+//                   tracks_.feature_last_.thickness_);
+//
+//        // draw track ID close to current feature
+//        if (tracks_.show_id_) //show features ID of current frame
+//        {
+//            int shift_text_x = 7;
+//            int shift_text_y = 10;
+//            int font_face = 1; // let's keep the default one
+//            cv::putText(
+//                    _image,
+//                    std::to_string(ftr.second->trackId()),
+//                    cv::Point(ftr.second->getMeasurement(0) - shift_text_x,
+//                              ftr.second->getMeasurement(1) - shift_text_y),
+//                    font_face,
+//                    tracks_.size_id_,
+//                    color_last_kpt);
+//        }
+//
+//    }
+
+    // loop for features alive in current capture
+    for (auto ftr_alive : _track_matrix.snapshot(_capture_img))
+    {
+        // get full track of feature
+        TrackConst track = _track_matrix.track(ftr_alive.second->trackId());
+        if (track.size() == 0)
+        {
+            WOLF_WARN("SIZE 0 TRACK!!");
+            continue;
+        }
+
+        // determine color and luminosity of track
+        cv::Scalar color          = colorTrackAndFeatures(track.size(),
+                                                          max_track_length,
+                                                          min_track_length,
+                                                          tracks_.color_);
+
+        // draw current feature
+        cv::circle(_image,
+                   cv::Point(ftr_alive.second->getMeasurement(0),
+                             ftr_alive.second->getMeasurement(1)),
+                   tracks_.feature_last_.size_pix_,
+                   color_last_kpt,
+                   tracks_.feature_last_.thickness_);
+
+        // draw track ID close to current feature
+        if (tracks_.show_id_) //show features ID of current frame
+        {
+            int shift_text_x = 7;
+            int shift_text_y = 10;
+            int font_face = 1; // let's keep the default one
+            cv::putText(
+                    _image,
+                    std::to_string(ftr_alive.second->trackId()),
+                    cv::Point(ftr_alive.second->getMeasurement(0) - shift_text_x,
+                              ftr_alive.second->getMeasurement(1) - shift_text_y),
+                    font_face,
+                    tracks_.size_id_,
+                    color_last_kpt);
+        }
+
+
+        // iterate track from current time to the beginning of track
+        for (TrackConst::reverse_iterator ftr = track.rbegin(); ftr != track.rend(); ++ftr) // start at the feature of the capture to draw
+        {
+            auto previous = std::prev(ftr); //previous iterator
+
+            //Check if feature is associated to a keyframe
+            bool is_keyframe = ftr->second && ftr->second->getCapture() && ftr->second->getCapture()->getFrame()
+                    && ftr->second->getCapture()->getFrame()->getProblem();
+            if (is_keyframe)
+            {
+                // mark features of previous keyframes
+                cv::circle(_image,
+                           cv::Point(ftr->second->getMeasurement(0),
+                                     ftr->second->getMeasurement(1)),
+                           tracks_.feature_kfs_.size_pix_,
+                           color,
+                           tracks_.feature_kfs_.thickness_);
+            }
+
+            // draw line in between keyframes (as well as last KF and current frame)
+            cv::line(_image,
+                     cv::Point(ftr->second->getMeasurement(0),
+                               ftr->second->getMeasurement(1)),
+                     cv::Point(previous->second->getMeasurement(0),
+                               previous->second->getMeasurement(1)),
+                     color,
+                     tracks_.thickness_);
+        }
+
+    }
+}
+
+Eigen::Isometry3d PublisherVisionDebug::getTcw(const CaptureBaseConstPtr _capture) const
+{
+    // get robot position and orientation at capture's timestamp
+    const auto& state   = problem_->getState(_capture->getTimeStamp(), "PO");
+    const auto& pos     = state.at('P');
+    const auto& ori     = state.at('O');
+
+    // compute world-to-camera transform
+    Eigen::Isometry3d T_w_r = Translation3d(pos) * Quaterniond(ori.data());
+    Eigen::Isometry3d T_r_c = Translation3d(_capture->getSensorP()->getState())
+            * Quaterniond(_capture->getSensorO()->getState().data());
+
+    // invert transform to camera-to-world and return
+    return (T_w_r * T_r_c).inverse();
+}
+
+Eigen::Matrix4d PublisherVisionDebug::getTcwMatrix(const CaptureBaseConstPtr _capture) const
+{
+    return getTcw(_capture).matrix();
+}
+
+Eigen::Matrix<double, 3, 4> PublisherVisionDebug::getProjectionMatrix(const CaptureBaseConstPtr _capture) const
+{
+    return (std::static_pointer_cast<const SensorCamera>(_capture->getSensor()))->getProjectionMatrix();
+}
+
+LandmarkBaseConstPtr PublisherVisionDebug::getAssociatedLandmark(const TrackMatrix& _track_matrix, const FeatureBaseConstPtr& _ftr)
+{
+    const auto& track_at_kfs      = _track_matrix.trackAtKeyframes(_ftr->trackId());
+
+    if (track_at_kfs.empty())
+        return nullptr;
+
+    const auto& feature_at_last_kf = track_at_kfs.rbegin()->second;
+    const auto& factor_list        = feature_at_last_kf->getFactorList();
+
+    if (factor_list.empty())
+        return nullptr;
+
+    const auto& lmk                = factor_list.front()->getLandmarkOther();
+
+    return lmk;
+}
+
+Eigen::Vector2d PublisherVisionDebug::projectLandmarkHp(const Eigen::Matrix<double, 3, 4>& _trf_proj_mat, const LandmarkBaseConstPtr _lmk)
+{
+    const auto& pos           = _lmk->getP()->getState();
+    Eigen::Vector3d pixel_hmg = _trf_proj_mat * pos;
+
+    Eigen::Vector2d pixel;
+    pixel << pixel_hmg(0)/pixel_hmg(2),
+            pixel_hmg(1)/pixel_hmg(2);
+
+    return pixel;
+}
+
+
+void PublisherVisionDebug::showLandmarks(cv::Mat                _image,
+                                         const TrackMatrix&     _track_matrix,
+                                         const CaptureBaseConstPtr&  _capture)
+{
+    // get stuff common to all landmarks
+    cv::Scalar color_lmks = primaryColor(landmarks_.color_);
+
+    const auto& Tcw         = getTcwMatrix(_capture);
+    const auto& prj_mat     = getProjectionMatrix(_capture);
+    const auto& trf_prj_mat = prj_mat * Tcw;
+
+    // draw one ladmark for each feature
+    const auto& ftrs_alive = _track_matrix.snapshotAsList(_capture);
+
+    if (ftrs_alive.empty()) return;
+
+    for (const auto& ftr: ftrs_alive)
+    {
+        // get stuff of this landmark
+        const auto& lmk         = getAssociatedLandmark(_track_matrix, ftr);
+
+        if (lmk != nullptr)
+        {
+            const auto& lmk_pixel   = projectLandmarkHp(trf_prj_mat, lmk);
+
+            cv::drawMarker(_image,
+                           cv::Point(lmk_pixel(0),
+                                     lmk_pixel(1)),
+                           color_lmks, 
+                           cv::MARKER_TILTED_CROSS,
+                           landmarks_.size_pix_, 1);
+
+            if (landmarks_.show_id_)
+            {
+                unsigned int lmk_id = lmk->id();
+                int shift_text_x = 7;
+                int shift_text_y = 10;
+                int font_face = 1; // let's keep the default one
+
+                cv::putText(_image,
+                            std::to_string(lmk_id),
+                            cv::Point(lmk_pixel(0) - shift_text_x,
+                                      lmk_pixel(1) - shift_text_y),
+                            font_face, 
+                            landmarks_.size_id_, 
+                            color_lmks);
+            }
+        }
+
+    }
+}
+
+void PublisherVisionDebug::showTracksPreprocess(cv::Mat _image,
+                                                const CaptureImageConstPtr& _origin,
+                                                const CaptureImageConstPtr& _last)
+{
+    cv::Scalar color_track_preprocess = primaryColor(tracks_preprocess_.color_);
+
+    const auto& tracks_origin               = _last->getTracksOrigin();
+    const auto& kps_last                    = _last->getKeyPoints();
+    const auto& kps_origin                  = _origin->getKeyPoints();
+
+    for(auto it = tracks_origin.begin(); it != tracks_origin.end(); it++)
+    {
+
+        const auto& it_ftr_origin    = kps_origin.find(it->first);
+        const auto& it_ftr_last      = kps_last.find(it->second);
+
+        if (it_ftr_origin != kps_origin.end() && it_ftr_last != kps_last.end())
+        {
+            const auto& ftr_origin     = it_ftr_origin->second.getCvKeyPoint();
+            const auto& ftr_last       = it_ftr_last->second.getCvKeyPoint();
+
+            if (tracks_preprocess_.show_features_)
+            {
+                cv::circle(_image,
+                           ftr_origin.pt,
+                           tracks_.feature_last_.size_pix_,
+                           color_track_preprocess,
+                           tracks_.feature_last_.thickness_);
+
+                cv::circle(_image,
+                           ftr_last.pt,
+                           tracks_.feature_kfs_.size_pix_,
+                           color_track_preprocess,
+                           tracks_.feature_kfs_.thickness_);
+            }
+
+            cv::line(_image,
+                     ftr_origin.pt,
+                     ftr_last.pt,
+                     color_track_preprocess,
+                     tracks_preprocess_.thickness_);
+        }
+    }
+}
+
+
+}