diff --git a/.gitignore b/.gitignore
index b1406042b7bdbb1ddb7812463a34adf9ac74b21e..717e61aaa791d19ad871dda9b85a9350cc64a939 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,5 +31,4 @@ src/CMakeFiles/cmake.check_cache
 vision.found
 
 \.vscode/
-build_release/
-
+doc/html
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c157c0c0db4b721b741312fb3c75b30b4d13cc71..86e5e79bfbeca59453a0bc25e04f96439d7e6acd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -83,7 +83,7 @@ option(_WOLF_TRACE "Enable wolf tracing macro" ON)
 
 # ============ DEPENDENCIES ==================
 FIND_PACKAGE(wolfcore REQUIRED)
-# SET(PRINT_INFO_VU false)
+SET(PRINT_INFO_VU false)
 FIND_PACKAGE(OpenCV REQUIRED)
 
 # ============ config.h ==================
@@ -115,16 +115,16 @@ SET(HDRS_CAPTURE
 include/vision/capture/capture_image.h
   )
 SET(HDRS_FACTOR
-include/vision/factor/factor_trifocal.h
-include/vision/factor/factor_ahp.h
+# include/vision/factor/factor_trifocal.h
+# include/vision/factor/factor_ahp.h
 include/vision/factor/factor_pixel_hp.h
-include/vision/factor/factor_epipolar.h
+# include/vision/factor/factor_epipolar.h
   )
 SET(HDRS_FEATURE
 include/vision/feature/feature_point_image.h
   )
 SET(HDRS_LANDMARK
-  include/vision/landmark/landmark_ahp.h
+  # include/vision/landmark/landmark_ahp.h
   include/vision/landmark/landmark_hp.h
   include/vision/landmark/landmark_point_3d.h
   )
@@ -132,11 +132,8 @@ SET(HDRS_MATH
 include/vision/math/pinhole_tools.h
   )
 SET(HDRS_PROCESSOR
-include/vision/processor/processor_tracker_feature_trifocal.h
-include/vision/processor/processor_params_image.h
-include/vision/processor/processor_tracker_landmark_image.h
-include/vision/processor/processor_tracker_feature_image.h
-include/vision/processor/processor_bundle_adjustment.h
+include/vision/processor/processor_visual_odometry.h
+include/vision/processor/active_search.h
   )
 SET(HDRS_SENSOR
 include/vision/sensor/sensor_camera.h
@@ -150,25 +147,19 @@ SET(SRCS_FEATURE
 src/feature/feature_point_image.cpp
   )
 SET(SRCS_LANDMARK
-src/landmark/landmark_ahp.cpp
+# src/landmark/landmark_ahp.cpp
 src/landmark/landmark_hp.cpp
 src/landmark/landmark_point_3d.cpp
   )
 SET(SRCS_PROCESSOR
-src/processor/processor_tracker_feature_trifocal.cpp
-src/processor/processor_tracker_feature_image.cpp
-src/processor/processor_tracker_landmark_image.cpp
-src/processor/processor_bundle_adjustment.cpp
+src/processor/processor_visual_odometry.cpp
+src/processor/active_search.cpp
   )
 SET(SRCS_SENSOR
 src/sensor/sensor_camera.cpp
   )
 SET(SRCS_YAML
-src/yaml/processor_tracker_feature_trifocal_yaml.cpp
-src/yaml/sensor_camera_yaml.cpp
-src/yaml/processor_image_yaml.cpp
-src/yaml/processor_bundle_adjustment_yaml.cpp
-  )
+)
 
 # create the shared library
 ADD_LIBRARY(${PLUGIN_NAME}
@@ -196,12 +187,7 @@ endif()
 #Link the created libraries
 #===============EXAMPLE=========================
 TARGET_LINK_LIBRARIES(${PLUGIN_NAME} wolfcore)
-<<<<<<< Updated upstream
 TARGET_LINK_LIBRARIES(${PLUGIN_NAME} OpenCV)
-# TARGET_LINK_LIBRARIES(${PLUGIN_NAME} ${vision_utils_LIBRARY})
-=======
-TARGET_LINK_LIBRARIES(${PLUGIN_NAME} ${OpenCV_LIBRARIES})
->>>>>>> Stashed changes
 
 #Build demos
 #===============EXAMPLE=========================
diff --git a/ROS-based code style eclipse indented 4sp.xml b/ROS-based code style eclipse indented 4sp.xml
deleted file mode 100644
index 73a372adf3fedd1fd54cf6889cfe0f570ef0e0c4..0000000000000000000000000000000000000000
--- a/ROS-based code style eclipse indented 4sp.xml	
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<profiles version="1">
-<profile kind="CodeFormatterProfile" name="ROS Formatting indented private" version="1">
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_in_empty_block" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.lineSplit" value="120"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_member_access" value="0"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_base_types" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_switchstatements_compare_to_switch" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_constructor_initializer_list" value="2"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_exception_specification" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_base_types" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_body_declarations_compare_to_access_specifier" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_exception_specification" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_template_arguments" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.use_tabs_only_for_leading_indentations" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.comment.min_distance_between_code_and_line_comment" value="1"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_expressions_in_array_initializer" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_declarator_list" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_bracket" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.tabulation.size" value="4"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_else_in_if_statement" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_enumerator_list" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_declarator_list" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_empty_lines" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_method_declaration" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_closing_angle_bracket_in_template_arguments" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_colon_in_base_clause" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.join_wrapped_lines" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_declarator_list" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_arguments_in_method_invocation" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.comment.never_indent_line_comments_on_first_column" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_between_empty_brackets" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_bracket" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_parameters_in_method_declaration" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_block" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_type_declaration" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_angle_bracket_in_template_arguments" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_expression_list" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_angle_bracket_in_template_parameters" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.continuation_indentation" value="2"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_expression_list" value="0"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_template_parameters" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_conditional_expression" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_access_specifier_extra_spaces" value="0"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_access_specifier_compare_to_type_header" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_body_declarations_compare_to_namespace_header" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_compact_if" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_assignment" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_conditional_expression_chain" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_template_parameters" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_expression_list" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_exception_specification" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_base_clause_in_type_declaration" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_identifier_in_function_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_between_empty_parens_in_exception_specification" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_declaration_compare_to_template_header" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_statements_compare_to_body" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_binary_expression" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.indent_statements_compare_to_block" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_template_arguments" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="18"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_closing_paren_in_cast" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_angle_bracket_in_template_parameters" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.tabulation.char" value="space"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_angle_bracket_in_template_parameters" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_colon_in_constructor_initializer_list" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_block_in_case" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.compact_else_if" value="true"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_after_template_declaration" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_colon_in_base_clause" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_switch" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.alignment_for_overloaded_left_shift_chain" value="16"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.indentation.size" value="4"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_namespace_declaration" value="next_line"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_angle_bracket_in_template_arguments" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_brace_in_namespace_declaration" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_closing_bracket" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_closing_angle_bracket_in_template_parameters" value="insert"/>
-<setting id="org.eclipse.cdt.core.formatter.insert_space_after_opening_angle_bracket_in_template_arguments" value="do not insert"/>
-</profile>
-</profiles>
diff --git a/ROS-based wolf code style QT indented 4sp.xml b/ROS-based wolf code style QT indented 4sp.xml
deleted file mode 100644
index 7ef65eaad10d85b649675af569f07ed7f463792a..0000000000000000000000000000000000000000
--- a/ROS-based wolf code style QT indented 4sp.xml	
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE QtCreatorCodeStyle>
-<!-- Written by QtCreator 3.0.1, 2016-02-22T12:02:02. -->
-<qtcreator>
- <data>
-  <variable>CodeStyleData</variable>
-  <valuemap type="QVariantMap">
-   <value type="bool" key="AlignAssignments">false</value>
-   <value type="bool" key="AutoSpacesForTabs">false</value>
-   <value type="bool" key="BindStarToIdentifier">false</value>
-   <value type="bool" key="BindStarToLeftSpecifier">false</value>
-   <value type="bool" key="BindStarToRightSpecifier">false</value>
-   <value type="bool" key="BindStarToTypeName">true</value>
-   <value type="bool" key="ExtraPaddingForConditionsIfConfusingAlign">true</value>
-   <value type="bool" key="IndentAccessSpecifiers">true</value>
-   <value type="bool" key="IndentBlockBody">true</value>
-   <value type="bool" key="IndentBlockBraces">false</value>
-   <value type="bool" key="IndentBlocksRelativeToSwitchLabels">true</value>
-   <value type="bool" key="IndentClassBraces">false</value>
-   <value type="bool" key="IndentControlFlowRelativeToSwitchLabels">true</value>
-   <value type="bool" key="IndentDeclarationsRelativeToAccessSpecifiers">true</value>
-   <value type="bool" key="IndentEnumBraces">false</value>
-   <value type="bool" key="IndentFunctionBody">true</value>
-   <value type="bool" key="IndentFunctionBraces">false</value>
-   <value type="bool" key="IndentNamespaceBody">false</value>
-   <value type="bool" key="IndentNamespaceBraces">false</value>
-   <value type="int" key="IndentSize">4</value>
-   <value type="bool" key="IndentStatementsRelativeToSwitchLabels">true</value>
-   <value type="bool" key="IndentSwitchLabels">true</value>
-   <value type="int" key="PaddingMode">2</value>
-   <value type="bool" key="SpacesForTabs">true</value>
-   <value type="int" key="TabSize">4</value>
-  </valuemap>
- </data>
- <data>
-  <variable>DisplayName</variable>
-  <value type="QString">ROS wolf</value>
- </data>
-</qtcreator>
diff --git a/demos/CMakeLists.txt b/demos/CMakeLists.txt
index 08e1123d93c87d54d9d9208ca353da58c0409676..4589b079c211bac6c29d6f20535e22d118100af5 100644
--- a/demos/CMakeLists.txt
+++ b/demos/CMakeLists.txt
@@ -1,23 +1,2 @@
-<<<<<<< Updated upstream
-
-if (OpenCV_FOUND) 
-  if (${OpenCV_VERSION_MAJOR} GREATER 1)
-    message("-- [INFO] Found OpenCV support")
-    ADD_DEFINITIONS(-DHAVE_OPENCV_H)
-    SET(USE_CV true)
-  else(${OpenCV_VERSION_MAJOR} GREATER 1)
-    message("[WARN] OpenCV support not installed. Minimum 2.4 version required.")
-    message("[WARN] Current version ${OpenCV_VERSION_MAJOR}")    
-  endif(${OpenCV_VERSION_MAJOR} GREATER 1)
-else(OpenCV_FOUND)
-    message("[WARN] OpenCV support not installed. Minimum 2.4 version required.")
-endif(OpenCV_FOUND)
-
-ADD_EXECUTABLE(demo_processor_bundle_adjustment demo_processor_bundle_adjustment.cpp)
-TARGET_LINK_LIBRARIES(demo_processor_bundle_adjustment ${PLUGIN_NAME} ${OpenCV_LIBS})
-=======
 ADD_EXECUTABLE(demo_visual_odometry demo_visual_odometry.cpp)
-TARGET_LINK_LIBRARIES(demo_visual_odometry ${PLUGIN_NAME} ${OpenCV_LIBS})
->>>>>>> Stashed changes
-
-
+TARGET_LINK_LIBRARIES(demo_visual_odometry ${PLUGIN_NAME} ${OpenCV_LIBS})
\ No newline at end of file
diff --git a/demos/calib_logitech_webcam_params.yaml b/demos/calib_logitech_webcam_params.yaml
deleted file mode 100644
index 1285f904ca31fc446c5b2b07cbe0e22ba600f4e8..0000000000000000000000000000000000000000
--- a/demos/calib_logitech_webcam_params.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 640
-image_height: 480
-camera_name: narrow_stereo
-camera_matrix:
-  rows: 3
-  cols: 3
-  data: [942.816482, 0.000000, 334.175643, 0.000000, 897.287666, 309.904004, 0.000000, 0.000000, 1.000000]
-distortion_model: plumb_bob
-distortion_coefficients:
-  rows: 1
-  cols: 5
-  data: [0.159958, -0.578834, 0.000000, 0.000000, 0.000000]
-rectification_matrix:
-  rows: 3
-  cols: 3
-  data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
-projection_matrix:
-  rows: 3
-  cols: 4
-  data: [953.235413, 0.000000, 334.175675, 0.000000, 0.000000, 907.202332, 309.904238, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
diff --git a/demos/camera_euroc_mav0_notang.yaml b/demos/camera_euroc_mav0_notang.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ace83297fb7c2ebca132d0f116ee46ecb4f0b18e
--- /dev/null
+++ b/demos/camera_euroc_mav0_notang.yaml
@@ -0,0 +1,26 @@
+width: 752
+height: 480
+camera_name: narrow_stereo
+camera_matrix:
+  rows: 3
+  cols: 3
+  data: [458, 0.000000, 367.215,
+         0.000000, 457.296, 248.375,
+         0.000000, 0.000000, 1.000000]
+distortion_model: plumb_bob
+distortion_coefficients:
+  rows: 1
+  cols: 5
+  data: [-0.28340811, 0.07395907, 0.00019359, 0.0, 0.000000]
+rectification_matrix:
+  rows: 3
+  cols: 3
+  data: [1.000000, 0.000000, 0.000000,
+         0.000000, 1.000000, 0.000000,
+         0.000000, 0.000000, 1.000000]
+projection_matrix:
+  rows: 3
+  cols: 4
+  data: [355.63, 0.000000, 362.27, 0.000000,
+         0.000000, 417.16, 249.65, 0.000000,
+         0.000000, 0.000000, 1.000000, 0.000000]
diff --git a/demos/demo_ORB.png b/demos/demo_ORB.png
deleted file mode 100644
index 016141f5309c1ed34a61d71cfa63b130ea90aa8f..0000000000000000000000000000000000000000
Binary files a/demos/demo_ORB.png and /dev/null differ
diff --git a/demos/demo_factor_AHP.cpp b/demos/demo_factor_AHP.cpp
deleted file mode 100644
index eb8f843cba6e4e3a66c351800059ea1da590d90a..0000000000000000000000000000000000000000
--- a/demos/demo_factor_AHP.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#include "core/math/pinhole_tools.h"
-#include "core/landmark/landmark_AHP.h"
-#include "core/factor/factor_AHP.h"
-#include "core/state_block/state_block.h"
-#include "core/state_block/state_quaternion.h"
-#include "core/sensor/sensor_camera.h"
-#include "core/capture/capture_image.h"
-
-int main()
-{
-    using namespace wolf;
-
-    std::cout << std::endl << "==================== factor AHP test ======================" << std::endl;
-
-    TimeStamp t = 1;
-
-    //=====================================================
-    // Environment variable for configuration files
-    std::string wolf_root = _WOLF_ROOT_DIR;
-    std::cout << wolf_root << std::endl;
-    //=====================================================
-
-    // Wolf problem
-    ProblemPtr wolf_problem_ptr_ = Problem::create("PO", 3);
-
-    /* Do this while there aren't extrinsic parameters on the yaml */
-    Eigen::Vector7d extrinsic_cam;
-    extrinsic_cam.setRandom();
-//    extrinsic_cam[0] = 0; //px
-//    extrinsic_cam[1] = 0; //py
-//    extrinsic_cam[2] = 0; //pz
-//    extrinsic_cam[3] = 0; //qx
-//    extrinsic_cam[4] = 0; //qy
-//    extrinsic_cam[5] = 0; //qz
-//    extrinsic_cam[6] = 1; //qw
-    extrinsic_cam.tail<4>().normalize();
-    std::cout << "========extrinsic_cam: " << extrinsic_cam.transpose() << std::endl;
-    const Eigen::VectorXd extr = extrinsic_cam;
-    /* Do this while there aren't extrinsic parameters on the yaml */
-
-    SensorBasePtr sensor_ptr = wolf_problem_ptr_->installSensor("CAMERA", "PinHole", extr, wolf_root + "/src/examples/camera_params_ueye_radial_dist.yaml");
-    SensorCameraPtr camera_ptr_ = std::static_pointer_cast<SensorCamera>(sensor_ptr);
-
-    // PROCESSOR
-    // one-liner API
-    ProcessorBasePtr processor_ptr = wolf_problem_ptr_->installProcessor("IMAGE LANDMARK", "ORB", "PinHole", wolf_root + "/src/examples/processor_image_feature.yaml");
-
-    // create the current frame
-    Eigen::Vector7d frame_pos_ori;
-    frame_pos_ori.setRandom();
-//    frame_pos_ori[0] = 0; //px
-//    frame_pos_ori[1] = 0; //py
-//    frame_pos_ori[2] = 0; //pz
-//    frame_pos_ori[3] = 0; //qx
-//    frame_pos_ori[4] = 0; //qy
-//    frame_pos_ori[5] = 0; //qz
-//    frame_pos_ori[6] = 1; //qw
-    frame_pos_ori.tail<4>().normalize();
-    const Eigen::VectorXd frame_val = frame_pos_ori;
-
-    FrameBasePtr last_frame = std::make_shared<FrameBase>(t,std::make_shared<StateBlock>(frame_val.head(3)), std::make_shared<StateQuaternion>(frame_val.tail(4)));
-    std::cout << "Last frame" << std::endl;
-    wolf_problem_ptr_->getTrajectory()->addFrame(last_frame);
-
-    // Capture
-    CaptureImagePtr image_ptr;
-    t.setToNow();
-    cv::Mat frame; //puede que necesite una imagen
-
-    image_ptr = std::make_shared< CaptureImage>(t, camera_ptr_, frame);
-    last_frame->addCapture(image_ptr);
-
-    // create the feature
-    cv::KeyPoint kp; kp.pt = {10,20};
-    cv::Mat desc;
-
-    FeaturePointImagePtr feat_point_image_ptr = std::make_shared<FeaturePointImage>(kp, 0, desc, Eigen::Matrix2d::Identity());
-    image_ptr->addFeature(feat_point_image_ptr);
-
-    FrameBasePtr anchor_frame = std::make_shared< FrameBase>(t,std::make_shared<StateBlock>(frame_val.head(3)), std::make_shared<StateQuaternion>(frame_val.tail(4)));
-    //FrameBasePtr anchor_frame = wolf_problem_ptr_->getTrajectory()->getLastFrame();
-
-    // create the landmark
-    Eigen::Vector2d point2D;
-    point2D[0] = feat_point_image_ptr->getKeypoint().pt.x;
-    point2D[1] = feat_point_image_ptr->getKeypoint().pt.y;
-    std::cout << "point2D: " << point2D.transpose() << std::endl;
-
-    double distance = 2; // arbitrary value
-    Eigen::Vector4d vec_homogeneous;
-
-    Eigen::VectorXd correction_vec = (std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getCorrectionVector();
-    std::cout << "correction vector: " << correction_vec << std::endl;
-    std::cout << "distortion vector: " << (std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getDistortionVector() << std::endl;
-    point2D = pinhole::depixellizePoint(image_ptr->getSensor()->getIntrinsic()->getState(),point2D);
-    std::cout << "point2D depixellized: " << point2D.transpose() << std::endl;
-    point2D = pinhole::undistortPoint((std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getCorrectionVector(),point2D);
-    std::cout << "point2D undistorted: " << point2D.transpose() << std::endl;
-
-    Eigen::Vector3d point3D;
-    point3D.head(2) = point2D;
-    point3D(2) = 1;
-
-    point3D.normalize();
-    std::cout << "point3D normalized: " << point3D.transpose() << std::endl;
-
-    vec_homogeneous = {point3D(0),point3D(1),point3D(2),1/distance};
-    std::cout << "vec_homogeneous: " << vec_homogeneous.transpose() << std::endl;
-
-    LandmarkAhpPtr landmark = std::make_shared<LandmarkAhp>(vec_homogeneous, anchor_frame, image_ptr->getSensor(), feat_point_image_ptr->getDescriptor());
-
-    std::cout << "Landmark AHP created" << std::endl;
-
-    // Create the factor
-    FactorAhpPtr factor_ptr = std::make_shared<FactorAhp>(feat_point_image_ptr, std::static_pointer_cast<LandmarkAhp>(landmark), processor_ptr);
-
-    feat_point_image_ptr->addFactor(factor_ptr);
-    std::cout << "Factor AHP created" << std::endl;
-
-    Eigen::Vector2d point2D_proj = point3D.head<2>()/point3D(2);
-    std::cout << "point2D projected: " << point2D_proj.transpose() << std::endl;
-
-    Eigen::Vector2d point2D_dist;
-    point2D_dist =  pinhole::distortPoint((std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getDistortionVector(),point2D_proj);
-    std::cout << "point2D distorted: " << point2D_dist.transpose() << std::endl;
-
-    Eigen::Vector2d point2D_pix;
-    point2D_pix = pinhole::pixellizePoint(image_ptr->getSensor()->getIntrinsic()->getState(),point2D_dist);
-    std::cout << "point2D pixellized: " << point2D_pix.transpose() << std::endl;
-
-    Eigen::Vector2d expectation;
-    Eigen::Vector3d current_frame_p = last_frame->getP()->getState();
-    Eigen::Vector4d current_frame_o = last_frame->getO()->getState();
-    Eigen::Vector3d anchor_frame_p = landmark->getAnchorFrame()->getP()->getState();
-    Eigen::Vector4d anchor_frame_o = landmark->getAnchorFrame()->getO()->getState();
-    Eigen::Vector4d landmark_ = landmark->getP()->getState();
-
-    factor_ptr ->expectation(current_frame_p.data(), current_frame_o.data(),
-            anchor_frame_p.data(), anchor_frame_o.data(),
-            landmark_.data(), expectation.data());
-//    current_frame p; current_frame o; anchor_frame p; anchor_frame o; homogeneous_vector landmark, residual
-
-    std::cout << "expectation computed" << std::endl;
-    std::cout << "expectation = " << expectation[0] << "   " << expectation[1] << std::endl;
-
-    return 0;
-
-}
diff --git a/demos/demo_gazebo_x-10cm_y-20cm.jpg b/demos/demo_gazebo_x-10cm_y-20cm.jpg
deleted file mode 100644
index 5ad1c83e82ec99b1493914d216bf9fc47c1120ed..0000000000000000000000000000000000000000
Binary files a/demos/demo_gazebo_x-10cm_y-20cm.jpg and /dev/null differ
diff --git a/demos/demo_gazebo_x-10cm_y00cm.jpg b/demos/demo_gazebo_x-10cm_y00cm.jpg
deleted file mode 100644
index 81d004c6643f33130babc65cf439e846b2a112d5..0000000000000000000000000000000000000000
Binary files a/demos/demo_gazebo_x-10cm_y00cm.jpg and /dev/null differ
diff --git a/demos/demo_gazebo_x-20cm_y-10cm.jpg b/demos/demo_gazebo_x-20cm_y-10cm.jpg
deleted file mode 100644
index 9b6f60ad8ffb895a94cc6873a061e0810f2d1dbc..0000000000000000000000000000000000000000
Binary files a/demos/demo_gazebo_x-20cm_y-10cm.jpg and /dev/null differ
diff --git a/demos/demo_gazebo_x-20cm_y-20cm.jpg b/demos/demo_gazebo_x-20cm_y-20cm.jpg
deleted file mode 100644
index 3c15fc66bdd164d15ff55b8647dccd035bcdb3f1..0000000000000000000000000000000000000000
Binary files a/demos/demo_gazebo_x-20cm_y-20cm.jpg and /dev/null differ
diff --git a/demos/demo_gazebo_x-20cm_y00cm.jpg b/demos/demo_gazebo_x-20cm_y00cm.jpg
deleted file mode 100644
index b94fdeef33fba7191105f013ace71c5717a1eefb..0000000000000000000000000000000000000000
Binary files a/demos/demo_gazebo_x-20cm_y00cm.jpg and /dev/null differ
diff --git a/demos/demo_image.cpp b/demos/demo_image.cpp
deleted file mode 100644
index f31e187998f98aba86a5874565199295c1926978..0000000000000000000000000000000000000000
--- a/demos/demo_image.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-// Testing things for the 3D image odometry
-
-//Wolf includes
-#include "core/sensor/sensor_camera.h"
-#include "core/capture/capture_image.h"
-#include "core/processor/processor_tracker_feature_image.h"
-#include "core/ceres_wrapper/solver_ceres.h"
-
-// Vision utils includes
-#include <vision_utils.h>
-#include <sensors.h>
-#include <common_class/buffer.h>
-#include <common_class/frame.h>
-
-//std includes
-#include <ctime>
-#include <iostream>
-#include <string>
-
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-    using std::shared_ptr;
-    using std::make_shared;
-    using std::static_pointer_cast;
-
-    //ProcessorImageFeature test
-    std::cout << std::endl << " ========= ProcessorImageFeature test ===========" << std::endl << std::endl;
-
-    // Sensor or sensor recording
-    vision_utils::SensorCameraPtr sen_ptr = vision_utils::askUserSource(argc, argv);
-    if (sen_ptr==NULL)
-    	return 0;
-
-    unsigned int buffer_size = 8;
-    vision_utils::Buffer<vision_utils::FramePtr> frame_buff(buffer_size);
-    frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), 0) );
-
-    unsigned int img_width  = frame_buff.back()->getImage().cols;
-    unsigned int img_height = frame_buff.back()->getImage().rows;
-    std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
-
-    // graphics
-    cv::namedWindow("Feature tracker");    // Creates a window for display.
-    cv::moveWindow("Feature tracker", 0, 0);
-    cv::startWindowThread();
-
-    CaptureImagePtr image_ptr;
-
-    TimeStamp t = 1;
-
-    std::string wolf_root = _WOLF_ROOT_DIR;
-    std::cout << "Wolf root: " << wolf_root << std::endl;
-
-    ProblemPtr wolf_problem_ = Problem::create("PO", 3);
-
-    //=====================================================
-    // Method 1: Use data generated here for sensor and processor
-    //=====================================================
-
-//        // SENSOR
-//        Eigen::Vector4d k = {320,240,320,320};
-//        SensorCameraPtr camera_ptr = std::make_shared<SensorCamera>(std::make_shared<StateBlock>(Eigen::Vector3d::Zero()),
-//                                                  std::make_shared<StateBlock>(Eigen::Vector3d::Zero()),
-//                                                  std::make_shared<StateBlock>(k,false),img_width,img_height);
-//
-//        wolf_problem_->getHardware()->addSensor(camera_ptr);
-//
-//        // PROCESSOR
-//        ProcessorParamsImage tracker_params;
-//        tracker_params.matcher.min_normalized_score = 0.75;
-//        tracker_params.matcher.similarity_norm = cv::NORM_HAMMING;
-//        tracker_params.matcher.roi_width = 30;
-//        tracker_params.matcher.roi_height = 30;
-//        tracker_params.active_search.grid_width = 12;
-//        tracker_params.active_search.grid_height = 8;
-//        tracker_params.active_search.separation = 1;
-//        tracker_params.max_new_features =0;
-//        tracker_params.min_features_for_keyframe = 20;
-//
-//        DetectorDescriptorParamsOrb orb_params;
-//        orb_params.type = DD_ORB;
-//
-//        DetectorDescriptorParamsBrisk brisk_params;
-//        brisk_params.type = DD_BRISK;
-//
-//        // select the kind of detector-descriptor parameters
-//        tracker_params.detector_descriptor_params_ptr = std::make_shared<DetectorDescriptorParamsOrb>(orb_params); // choose ORB
-////        tracker_params.detector_descriptor_params_ptr = std::make_shared<DetectorDescriptorParamsBrisk>(brisk_params); // choose BRISK
-//
-//        std::cout << tracker_params.detector_descriptor_params_ptr->type << std::endl;
-//
-//        ProcessorTrackerTrifocalTensorPtr prc_image = std::make_shared<ProcessorImageFeature>(tracker_params);
-////        camera_ptr->addProcessor(prc_image);
-//        std::cout << "sensor & processor created and added to wolf problem" << std::endl;
-    //=====================================================
-
-    //=====================================================
-    // Method 2: Use factory to create sensor and processor
-    //=====================================================
-
-    /* Do this while there aren't extrinsic parameters on the yaml */
-    Eigen::Vector7d extrinsic_cam;
-    extrinsic_cam[0] = 0; //px
-    extrinsic_cam[1] = 0; //py
-    extrinsic_cam[2] = 0; //pz
-    extrinsic_cam[3] = 0; //qx
-    extrinsic_cam[4] = 0; //qy
-    extrinsic_cam[5] = 0; //qz
-    extrinsic_cam[6] = 1; //qw
-    std::cout << "========extrinsic_cam: " << extrinsic_cam.transpose() << std::endl;
-    const Eigen::VectorXd extr = extrinsic_cam;
-    /* Do this while there aren't extrinsic parameters on the yaml */
-
-    // SENSOR
-    // one-liner API
-    SensorBasePtr sensor_ptr = wolf_problem_->installSensor("CAMERA", "PinHole", Eigen::VectorXd::Zero(7), wolf_root + "/src/examples/camera_params_ueye_sim.yaml");
-    SensorCameraPtr camera_ptr = static_pointer_cast<SensorCamera>(sensor_ptr);
-    camera_ptr->setImgWidth(img_width);
-    camera_ptr->setImgHeight(img_height);
-
-    // PROCESSOR
-    // one-liner API
-    ProcessorTrackerFeatureImagePtr prc_img_ptr = std::static_pointer_cast<ProcessorTrackerFeatureImage>( wolf_problem_->installProcessor("IMAGE FEATURE", "ORB", "PinHole", wolf_root + "/src/examples/processor_image_feature.yaml") );
-    std::cout << "sensor & processor created and added to wolf problem" << std::endl;
-    //=====================================================
-
-//    // Ceres wrapper
-//    ceres::Solver::Options ceres_options;
-//    ceres_options.minimizer_type = ceres::TRUST_REGION; //ceres::TRUST_REGION;LINE_SEARCH
-//    ceres_options.max_line_search_step_contraction = 1e-3;
-//    // ceres_options.minimizer_progress_to_stdout = false;
-//    // ceres_options.line_search_direction_type = ceres::LBFGS;
-//    // ceres_options.max_num_iterations = 100;
-//    google::InitGoogleLogging(argv[0]);
-//    CeresManager ceres_manager(wolf_problem_, ceres_options);
-
-    for(int f = 0; f<10000; ++f)
-    {
-        frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), f) );
-
-    	std::cout << "\n=============== Frame #: " << f << " ===============" << std::endl;
-
-        t.setToNow();
-        clock_t t1 = clock();
-
-        // Preferred method with factory objects:
-        image_ptr = make_shared<CaptureImage>(t, camera_ptr, frame_buff.back()->getImage());
-
-        camera_ptr->process(image_ptr);
-
-        std::cout << "Time: " << ((double) clock() - t1) / CLOCKS_PER_SEC << "s" << std::endl;
-
-        wolf_problem_->print();
-
-        cv::Mat image = frame_buff.back()->getImage().clone();
-        prc_img_ptr->drawFeatures(image);
-        prc_img_ptr->drawRoi(image,prc_img_ptr->detector_roi_,cv::Scalar(0.0,255.0, 255.0));   //active search roi
-        prc_img_ptr->drawRoi(image,prc_img_ptr->tracker_roi_, cv::Scalar(255.0, 0.0, 255.0));  //tracker roi
-        prc_img_ptr->drawTarget(image,prc_img_ptr->tracker_target_);
-        cv::imshow("Feature tracker", image);
-        cv::waitKey(1);
-
-//        if((f%100) == 0)
-//        {
-//            std::string summary = ceres_manager.solve(2);
-//            std::cout << summary << std::endl;
-//
-//            std::cout << "Last key frame pose: "
-//                      << wolf_problem_->getLastKeyFrame()->getP()->getState().transpose() << std::endl;
-//            std::cout << "Last key frame orientation: "
-//                      << wolf_problem_->getLastKeyFrame()->getO()->getState().transpose() << std::endl;
-//
-//            cv::waitKey(0);
-//        }
-    }
-}
diff --git a/demos/demo_processor_bundle_adjustment.cpp b/demos/demo_processor_bundle_adjustment.cpp
index d7b3affbaabd67b3484ce89b492213916755a964..5a13abcfb91f6bdac694aabdbba3f1f0b8fa71a7 100644
--- a/demos/demo_processor_bundle_adjustment.cpp
+++ b/demos/demo_processor_bundle_adjustment.cpp
@@ -173,9 +173,9 @@ int main(int argc, char** argv)
         camera->process(image);
 
         // solve only when new KFs are added
-        if (problem->getTrajectory()->getFrameMap().size() > number_of_KFs)
+        if (problem->getTrajectory()->size() > number_of_KFs)
         {
-            number_of_KFs = problem->getTrajectory()->getFrameMap().size();
+            number_of_KFs = problem->getTrajectory()->size();
             std::string report = solver->solve(wolf::SolverManager::ReportVerbosity::BRIEF);
             std::cout << report << std::endl;
             if (number_of_KFs > 5)
diff --git a/demos/demo_projection_points.cpp b/demos/demo_projection_points.cpp
deleted file mode 100644
index dbbb0a763eb8214b01ec244d841e74052531a8ed..0000000000000000000000000000000000000000
--- a/demos/demo_projection_points.cpp
+++ /dev/null
@@ -1,489 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-
-// Vision utils
-#include <vision_utils/vision_utils.h>
-
-//std includes
-#include <iostream>
-
-//wolf includes
-#include "core/math/pinhole_tools.h"
-
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-
-    std::cout << std::endl << " ========= ProjectPoints test ===========" << std::endl << std::endl;
-
-    Eigen::MatrixXd points3D(2,3);
-    Eigen::Vector3d point3D;
-    point3D[0] = 2.0;
-    point3D[1] = 5.0;
-    point3D[2] = 6.0;
-    points3D.row(0)= point3D;
-    point3D[0] = 4.0;
-    point3D[1] = 2.0;
-    point3D[2] = 1.0;
-    points3D.row(1)= point3D;
-
-    Eigen::Vector3d cam_ext_rot_mat = Eigen::Vector3d::Zero();
-    Eigen::Vector3d cam_ext_trans_mat = Eigen::Vector3d::Ones();
-
-    Eigen::Matrix3d cam_intr_mat;
-    cam_intr_mat = Eigen::Matrix3d::Identity();
-    cam_intr_mat(0,2)=2;
-    cam_intr_mat(1,2)=2;
-
-    Eigen::VectorXd dist_coef(5);
-    dist_coef << 0,0,0,0,0;
-
-    Eigen::MatrixXd points2D = vision_utils::projectPoints(points3D, cam_ext_rot_mat, cam_ext_trans_mat, cam_intr_mat, dist_coef);
-
-    for (int ii = 0; ii < points3D.rows(); ++ii)
-        std::cout << "points2D- X: " << points2D(ii,0) << "; Y: " << points2D(ii,1) << std::endl;
-
-    std::cout << std::endl << " ========= PinholeTools DUALITY TEST ===========" << std::endl << std::endl;
-
-    //================================= projectPoint and backprojectPoint to/from NormalizedPlane
-
-    Eigen::Vector3d project_point_normalized_test;
-    project_point_normalized_test[0] = 1.06065;
-    project_point_normalized_test[1] = 1.06065;
-    project_point_normalized_test[2] = 3;
-    Eigen::Vector2d project_point_normalized_output;
-    Eigen::Vector2d project_point_normalized_output2;
-    double project_point_normalized_dist;
-
-    double backproject_point_normalized_depth = 3;
-    Eigen::Vector3d backproject_point_normalized_output;
-
-    project_point_normalized_output = pinhole::projectPointToNormalizedPlane(project_point_normalized_test);
-
-    pinhole::projectPointToNormalizedPlane(project_point_normalized_test,
-                                           project_point_normalized_output2,
-                                           project_point_normalized_dist);
-
-    backproject_point_normalized_output =
-            pinhole::backprojectPointFromNormalizedPlane(project_point_normalized_output,
-                                                         backproject_point_normalized_depth);
-
-    std::cout << "TEST project and backproject PointToNormalizedPlane" << std::endl;
-    std::cout << std:: endl << "Original         " << project_point_normalized_test.transpose() << std::endl;
-    std::cout << std:: endl << "Project          " << project_point_normalized_output.transpose() << std::endl;
-    std::cout << std:: endl << "Alternate project" << project_point_normalized_output.transpose() << std::endl;
-    std::cout << std:: endl << "Backproject      " << backproject_point_normalized_output.transpose() << std::endl;
-
-    //================================= projectPoint and backprojectPoint to/from NormalizedPlane WITH JACOBIANS
-
-    Eigen::Vector3d pp_normalized_test;
-    pp_normalized_test[0] = 3;
-    pp_normalized_test[1] = 3;
-    pp_normalized_test[2] = 3;
-    Eigen::Vector2d pp_normalized_output;
-    Eigen::Vector2d pp_normalized_output2;
-    Eigen::Matrix<double, 2, 3> pp_normalized_jacobian;
-    Eigen::Matrix<double, 2, 3> pp_normalized_jacobian2;
-    double pp_normalized_distance;
-
-    double bpp_normalized_depth = 3;
-    Eigen::Vector3d bpp_normalized_output;
-    Eigen::Matrix<double, 3, 2> bpp_normalized_jacobian;
-    Eigen::Vector3d bpp_normalized_jacobian_depth;
-
-    pinhole::projectPointToNormalizedPlane(pp_normalized_test,
-                                           pp_normalized_output,
-                                           pp_normalized_jacobian);
-    pinhole::projectPointToNormalizedPlane(pp_normalized_test,
-                                           pp_normalized_output2,
-                                           pp_normalized_distance,
-                                           pp_normalized_jacobian2);
-
-    pinhole::backprojectPointFromNormalizedPlane(pp_normalized_output,
-                                                 bpp_normalized_depth,
-                                                 bpp_normalized_output,
-                                                 bpp_normalized_jacobian,
-                                                 bpp_normalized_jacobian_depth);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << "\nTEST project and backproject PointToNormalizedPlane with JACOBIAN" << std::endl;
-
-    std::cout << std:: endl << "Original" << pp_normalized_test.transpose() << std::endl;
-    std::cout << std:: endl << "Project" << pp_normalized_output.transpose() << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl << pp_normalized_jacobian << std::endl;
-
-    std::cout << std:: endl << "Alternate project" << pp_normalized_output2.transpose() << "; distance: "
-              << pp_normalized_distance << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl << pp_normalized_jacobian2 << std::endl;
-
-    std::cout << std:: endl << "Backproject" << bpp_normalized_output.transpose()
-              << "; depth: " << bpp_normalized_depth << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl << bpp_normalized_jacobian << std::endl;
-    std::cout << "\n--> Jacobian - depth" << bpp_normalized_jacobian_depth.transpose() << std::endl;
-
-    Eigen::Matrix2d test_jacobian; // (2x3) * (3x2) = (2x2)
-    test_jacobian =  pp_normalized_jacobian * bpp_normalized_jacobian;
-
-    std::cout << "\n\n\n==> Jacobian Testing" << std::endl << test_jacobian << std::endl;
-
-    //================================= IsInRoi / IsInImage
-
-    Eigen::Vector2d pix;
-    pix[0] = 40; // x
-    pix[1] = 40; // y
-
-    int roi_x = 30;
-    int roi_y = 30;
-    int roi_width = 20;
-    int roi_height = 20;
-
-    int image_width = 640;
-    int image_height = 480;
-
-    bool is_in_roi;
-    bool is_in_image;
-    is_in_roi = pinhole::isInRoi(pix,
-                                 roi_x,
-                                 roi_y,
-                                 roi_width,
-                                 roi_height);
-    is_in_image = pinhole::isInImage(pix,
-                                     image_width,
-                                     image_height);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << "\nTEST isInRoi/isInImage" << std::endl;
-    std::cout << std::endl << "Pixel " << std::endl;
-    std::cout << "x: " << pix[0] << "; y: " << pix[1] << std::endl;
-    std::cout << std::endl << "ROI " << std::endl;
-    std::cout << "x: " << roi_x << "; y: " << roi_y << "; width: " << roi_width << "; height: " << roi_height << std::endl;
-    std::cout << "is_in_roi: " << is_in_roi << std::endl;
-    std::cout << std::endl << "Image " << std::endl;
-    std::cout << "width: " << image_width << "; height: " << image_height << std::endl;
-    std::cout << "is_in_image: " << is_in_image << std::endl;
-
-    //================================= computeCorrectionModel
-
-    Eigen::Vector2d distortion2;
-    distortion2[0] = -0.301701;
-    distortion2[1] = 0.0963189;
-    Eigen::Vector4d k_test2;
-    //k = [u0, v0, au, av]
-    k_test2[0] = 516.686; //u0
-    k_test2[1] = 355.129; //v0
-    k_test2[2] = 991.852; //au
-    k_test2[3] = 995.269; //av
-
-    Eigen::Vector2d correction_test2;
-    pinhole::computeCorrectionModel(k_test2,
-                                    distortion2,
-                                    correction_test2);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << "\nTEST computeCorrectionModel" << std::endl;
-    std::cout << std::endl << "distortion" << std::endl;
-    std::cout << "d1: " << distortion2[0] << "; d2: " << distortion2[1] << std::endl;
-    std::cout << std::endl << "k values" << std::endl;
-    std::cout << "u0: " << k_test2[0] << "; v0: " << k_test2[1] << "; au: " << k_test2[2] << "; av: " << k_test2[3] << std::endl;
-    std::cout << std::endl << "correction" << std::endl;
-    std::cout << "c1: " << correction_test2[0] << "; c2: " << correction_test2[1] << std::endl;
-
-    //================================= distortPoint
-
-    Eigen::Vector2d distorting_point;
-    distorting_point[0] = 0.35355;
-    distorting_point[1] = 0.35355;
-
-    Eigen::Vector2d distored_point3;
-    distored_point3 = pinhole::distortPoint(distortion2,
-                                            distorting_point);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << "\nTEST distortPoint" << std::endl;
-    std::cout << std::endl << "Point to be distorted" << std::endl;
-    std::cout << "x: " << distorting_point[0] << "; y: " << distorting_point[1] << std::endl;
-    std::cout << std::endl << "Distorted point" << std::endl;
-    std::cout << "x: " << distored_point3[0] << "; y: " << distored_point3[1] << std::endl;
-
-    Eigen::Vector2d corrected_point4;
-    corrected_point4 = pinhole::undistortPoint(correction_test2,
-                                               distored_point3);
-    std::cout << std::endl << "Corrected point" << std::endl;
-    std::cout << "x: " << corrected_point4[0] << "; y: " << corrected_point4[1] << std::endl;
-
-    ////
-
-    Eigen::Vector2d distored_point4;
-    Eigen::Matrix2d distortion_jacobian2;
-    pinhole::distortPoint(distortion2,
-                          distorting_point,
-                          distored_point4,
-                          distortion_jacobian2);
-
-    std::cout << "\n\nTEST distortPoint, jacobian" << std::endl;
-    std::cout << std::endl << "Point to be distorted" << std::endl;
-    std::cout << "x: " << distorting_point[0] << "; y: " << distorting_point[1] << std::endl;
-    std::cout << std::endl << "Distorted point" << std::endl;
-    std::cout << "x: " << distored_point4[0] << "; y: " << distored_point4[1] << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl <<
-                 distortion_jacobian2 << std::endl;
-
-    Eigen::Vector2d corrected_point5;
-    Eigen::Matrix2d corrected_jacobian2;
-    pinhole::undistortPoint(correction_test2,
-                            distored_point4,
-                            corrected_point5,
-                            corrected_jacobian2);
-
-    std::cout << std::endl << "Corrected point" << std::endl;
-    std::cout << "x: " << corrected_point5[0] << "; y: " << corrected_point5[1] << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl <<
-                 corrected_jacobian2 << std::endl;
-
-    Eigen::Matrix2d test_jacobian_distortion;
-    test_jacobian_distortion =  distortion_jacobian2 * corrected_jacobian2;
-
-    std::cout << "\n\n\n==> Jacobian Testing" << std::endl <<
-                 test_jacobian_distortion << std::endl;
-
-    //================================= PixelizePoint
-
-    Eigen::Vector2d pixelize_ud;
-    pixelize_ud[0] = 45;
-    pixelize_ud[1] = 28;
-
-    Eigen::Vector2d pixelize_output3;
-    pixelize_output3 = pinhole::pixellizePoint(k_test2,
-                                               pixelize_ud);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << "\nTEST pixelizePoint; Eigen::Vector2d" << std::endl;
-    std::cout << std::endl << "Original" << std::endl;
-    std::cout << "x: " << pixelize_ud[0] << "; y: " << pixelize_ud[1] << std::endl;
-    std::cout << std::endl << "Pixelized" << std::endl;
-    std::cout << "x: " << pixelize_output3[0] << "; y: " << pixelize_output3[1] << std::endl;
-
-    Eigen::Vector2d depixelize_output3;
-    depixelize_output3 = pinhole::depixellizePoint(k_test2,
-                                                   pixelize_output3);
-    std::cout << std::endl << "Depixelized" << std::endl;
-    std::cout << "x: " << depixelize_output3[0] << "; y: " << depixelize_output3[1] << std::endl;
-
-    ////
-
-    Eigen::Vector2d pixelize_output4;
-    Eigen::Matrix2d pixelize_jacobian2;
-    pinhole::pixellizePoint(k_test2,
-                            pixelize_ud,
-                            pixelize_output4,
-                            pixelize_jacobian2);
-
-    std::cout << std::endl << "TEST pixelizePoint; Jacobians" << std::endl;
-    std::cout << std::endl << "Original" << std::endl;
-    std::cout << "x: " << pixelize_ud[0] << "; y: " << pixelize_ud[1] << std::endl;
-    std::cout << std::endl << "Pixelized" << std::endl;
-    std::cout << "x: " << pixelize_output4[0] << "; y: " << pixelize_output4[1] << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl <<
-                 pixelize_jacobian2 << std::endl;
-
-    Eigen::Vector2d depixelize_output4;
-    Eigen::Matrix2d depixelize_jacobian2;
-    pinhole::depixellizePoint(k_test2,
-                              pixelize_output4,
-                              depixelize_output4,
-                              depixelize_jacobian2);
-
-    std::cout << std::endl << "Depixelized" << std::endl;
-    std::cout << "x: " << depixelize_output4[0] << "; y: " << depixelize_output4[1] << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl <<
-                 depixelize_jacobian2 << std::endl;
-
-    Eigen::Matrix2d test_jacobian_pix;
-    test_jacobian_pix =  pixelize_jacobian2 * depixelize_jacobian2;
-
-    std::cout << "\n\n\n==> Jacobian Testing" << std::endl <<
-                 test_jacobian_pix << std::endl;
-
-    //================================= projectPoint Complete
-
-//    //distortion
-//    distortion2;
-
-//    //k
-//    k_test2;
-
-//    //3Dpoint
-//    project_point_normalized_test;
-
-    Eigen::Vector2d point2D_test5;
-    point2D_test5 = pinhole::projectPoint(k_test2,
-                                          distortion2,
-                                          project_point_normalized_test);
-
-    std::cout << "\n--------------------------------------------------------" << std::endl;
-    std::cout << std::endl << "TEST projectPoint Complete" << std::endl;
-    std::cout << "\nPARAMS" << std::endl;
-    std::cout << "\nDistortion:" << std::endl;
-    std::cout << "d1: " << distortion2[0] << "; d2: " << distortion2[1] << std::endl << std::endl;
-    std::cout << "k values:" << std::endl;
-    std::cout << "u0: " << k_test2[0] << "; v0: " << k_test2[1] << "; au: " << k_test2[2] << "; av: " << k_test2[3] << std::endl << std::endl;
-    std::cout << "3D Point" << std::endl;
-    std::cout << "x: " << project_point_normalized_test[0] << "; y: " << project_point_normalized_test[1]
-              << "; z: " << project_point_normalized_test[2] << std::endl;
-    std::cout << "\n\n\nFirst function output" << std::endl;
-    std::cout << "x: " << point2D_test5[0] << "; y: " << point2D_test5[1] << std::endl;
-
-        //distance
-    Eigen::Vector2d point2D_test6;
-    double distance_test4;
-    pinhole::projectPoint(k_test2,
-                          distortion2,
-                          project_point_normalized_test,
-                          point2D_test6,
-                          distance_test4);
-
-    std::cout << std::endl << "Second function output" << std::endl;
-    std::cout << "x: " << point2D_test6[0] << "; y: " << point2D_test6[1] << "; dist: " << distance_test4 << std::endl;
-
-        //jacobian
-    Eigen::Vector2d point2D_test7;
-    Eigen::MatrixXd jacobian_test3(2,3);
-    pinhole::projectPoint(k_test2,
-                          distortion2,
-                          project_point_normalized_test,
-                          point2D_test7,
-                          jacobian_test3);
-
-    std::cout << std::endl << "Third function output" << std::endl;
-    std::cout << "x: " << point2D_test7[0] << "; y: " << point2D_test7[1] << std::endl;
-    std::cout << "\n-->Jacobian" << std::endl <<
-                 jacobian_test3 << std::endl;
-
-        //jacobian and distance
-    Eigen::Vector2d point2D_test8;
-    Eigen::MatrixXd jacobian_test4(2,3);
-    double distance_test3;
-    pinhole::projectPoint(k_test2,
-                          distortion2,
-                          project_point_normalized_test,
-                          point2D_test8,
-                          distance_test3,
-                          jacobian_test4);
-
-    std::cout << std::endl << "Fourth function output" << std::endl;
-    std::cout << "x: " << point2D_test8[0] << "; y: " << point2D_test8[1] << "; dist: " << distance_test3 << std::endl;
-    std::cout << "\n-->Jacobian" << std::endl <<
-                 jacobian_test4 << std::endl;
-
-    /////////////////////////////
-
-//    //correction
-//    correction_test2
-
-//    //2Dpoint
-//    point2D_test5
-
-    double depth3 = project_point_normalized_test[2];
-
-    Eigen::Vector3d point3D_backproj5;
-    point3D_backproj5 = pinhole::backprojectPoint(k_test2,
-                                                  correction_test2,
-                                                  point2D_test5,
-                                                  depth3);
-
-    std::cout << "\n\nTEST backprojectPoint Complete" << std::endl;
-    std::cout << std::endl << "First function output" << std::endl;
-    std::cout << "x: " << point3D_backproj5[0] << "; y: " << point3D_backproj5[1] << "; z: " << point3D_backproj5[2] << std::endl;
-
-        //jacobian
-    Eigen::Vector3d point3D_backproj4;
-    Eigen::MatrixXd jacobian_backproj2(3,2);
-    Eigen::Vector3d depth_jacobian2;
-    pinhole::backprojectPoint(k_test2,
-                              correction_test2,
-                              point2D_test7,
-                              depth3,
-                              point3D_backproj4,
-                              jacobian_backproj2,
-                              depth_jacobian2);
-
-    std::cout << std::endl << "Second function output" << std::endl;
-    std::cout << "x: " << point3D_backproj4[0] << "; y: " << point3D_backproj4[1] << "; z: " << point3D_backproj4[2] << std::endl;
-    std::cout << "\n--> Jacobian" << std::endl <<
-                 jacobian_backproj2 << std::endl;
-    std::cout << "\n--> Jacobian - depth" << std::endl <<
-                 depth_jacobian2[0] << " " << depth_jacobian2[1] << " " << depth_jacobian2[2] << " " << std::endl;
-
-    Eigen::Matrix2d test_jacobian_complete;
-    test_jacobian_complete =  jacobian_test4 * jacobian_backproj2;
-
-    std::cout << "\n\n\n==> Jacobian Testing" << std::endl <<
-                 test_jacobian_complete << std::endl;
-
-    /* Test */
-    std::cout << "\n\n\n\nTEST\n" << std::endl;
-
-    Eigen::Matrix3d K;
-    K(0,0) = 830.748734;    K(0,1) = 0;             K(0,2) = 327.219132;
-    K(1,0) = 0;             K(1,1) = 831.18208;     K(1,2) = 234.720244;
-    K(2,0) = 0;             K(2,1) = 0;             K(2,2) = 1;
-
-    Eigen::Vector4d K_params = {327.219132,234.720244, 830.748734,831.18208};
-
-    std::cout << "K:\n" << K << std::endl;
-
-    Eigen::Vector4d distortion_vector = {0.0006579999999999999, 0.023847, -0.001878, 0.007706999999999999};
-
-    std::cout << "\nDistortion vector:\n" << distortion_vector << std::endl;
-
-    Eigen::Vector4d correction_vector;
-    pinhole::computeCorrectionModel(K_params,
-                                    distortion_vector,
-                                    correction_vector);
-
-    std::cout << "\nCorrection vector:\n" << correction_vector << std::endl;
-
-    Eigen::Vector3d test_point3D;
-    Eigen::Vector2d test_point2D = {123,321};
-    std::cout << "\ntest_point2D ORIGINAL:\n" << test_point2D << std::endl;
-
-    test_point2D = pinhole::depixellizePoint(K_params,
-                                             test_point2D);
-    std::cout << "\ntest_point2D DEPIXELIZED:\n" << test_point2D << std::endl;
-    test_point2D = pinhole::undistortPoint(correction_vector,
-                                           test_point2D);
-    std::cout << "\ntest_point2D UNDISTORTED:\n" << test_point2D << std::endl;
-    test_point3D = pinhole::backprojectPointFromNormalizedPlane(test_point2D,
-                                                                2);
-    std::cout << "\ntest_point3D BACKPROJECTED:\n" << test_point3D << std::endl;
-
-    test_point2D = pinhole::projectPointToNormalizedPlane(test_point3D);
-    std::cout << "\ntest_point2D NORMALIZED:\n" << test_point2D << std::endl;
-    test_point2D = pinhole::distortPoint(distortion_vector,
-                                         test_point2D);
-    std::cout << "\ntest_point2D DISTORTED:\n" << test_point2D << std::endl;
-    test_point2D = pinhole::pixellizePoint(K_params,
-                                           test_point2D);
-    std::cout << "\ntest_point2D PIXELIZED:\n" << test_point2D << std::endl;
-
-}
-
diff --git a/demos/demo_simple_AHP.cpp b/demos/demo_simple_AHP.cpp
deleted file mode 100644
index 60c4fe242946b7f9a60659488ca7a9914cd9ee93..0000000000000000000000000000000000000000
--- a/demos/demo_simple_AHP.cpp
+++ /dev/null
@@ -1,277 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-/**
- * \file test_simple_AHP.cpp
- *
- *  Created on: 2 nov. 2016
- *      \author: jtarraso
- */
-
-#include "core/common/wolf.h"
-#include "core/frame/frame_base.h"
-#include "core/math/pinhole_tools.h"
-#include "core/sensor/sensor_camera.h"
-#include "core/math/rotations.h"
-#include "core/capture/capture_image.h"
-#include "core/landmark/landmark_AHP.h"
-#include "core/factor/factor_AHP.h"
-#include "core/ceres_wrapper/solver_ceres.h"
-
-// Vision utils
-#include <vision_utils/vision_utils.h>
-
-/**
- * This test simulates the following situation:
- *   - A kf at the origin, we use it as anchor: kf1
- *   - A kf at the origin, we use it to project lmks onto the anchor frame: kf2
- *   - A kf at 1m to the right of the origin, kf3
- *   - A kf at 1m to the left of the origin, kf4
- *   - A lmk at 1m distance in front of the anchor: L1
- *     - we use this lmk to project it onto kf2, kf3 and kf4 and obtain measured pixels p0, p1 and p2
- *   - A lmk initialized at kf1, with measurement p0, at a distance of 2m: L2
- *     - we project the pixels p3 and p4: we observe that they do not match p1 and p2
- *     - we use the measurements p1 and p2 to solve the optimization problem on L2: L2 should converge to L1.
- *   - This is a sketch of the situation:
- *     - X, Y are the axes in world frame
- *     - x, z are the axes in anchor camera frame. We have that X=z, Y=-x.
- *     - Axes Z and y are perpendicular to the drawing; they have no effect.
- *
- *                X,z
- *                 ^
- *                 |
- *              L2 * 2
- *                .|.
- *               . | .
- *              .  |  .
- *             .   |   .
- *            . L1 * 1  .
- *           .   . | .   .
- *          .  .   |   .  .
- *      p4 . .     |     . . p3
- *        .. p2    |    p1 ..
- *  Y <--+---------+---------+
- * -x   +1         0        -1
- *      kf4      kf1        kf3
- *               kf2
- *
- *      camera: (uo,vo) = (320,240)
- *              (au,av) = (320,320)
- *
- *      projection geometry:
- *
- *     1:1  2:1  1:0  2:1  1:1
- *      0   160  320  480  640
- *      +----+----+----+----+
- *                |
- *                | 320
- *                |
- *                *
- *
- *      projected pixels:
- *      p0 = (320,240) // at optical axis or relation 1:0
- *      p1 = ( 0 ,240) // at 45 deg or relation 1:1
- *      p2 = (640,240) // at 45 deg or relation 1:1
- *      p3 = (160,240) // at a relation 2:1
- *      p4 = (480,240) // at a relation 2:1
- *
- */
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-    using namespace Eigen;
-
-    /* 1. crear 2 kf, fixed
-     * 2. crear 1 sensor
-     * 3. crear 1 lmk1
-     * 4. projectar lmk sobre sensor a fk1
-     * 5. projectar lmk sobre sensor a kf4
-     * 6. // esborrar lmk lmk_ptr->remove() no cal
-     * 7. crear nous kf
-     * 8. crear captures
-     * 9. crear features amb les mesures de 4 i 5
-     * 10. crear lmk2 des de kf3
-     * 11. crear factor des del kf4 a lmk2, amb ancora al kf3
-     * 12. solve
-     * 13. lmk1 == lmk2 ?
-     */
-
-    // ============================================================================================================
-    /* 1 */
-    ProblemPtr problem = Problem::create("PO", 3);
-    // One anchor frame to define the lmk, and a copy to make a factor
-    FrameBasePtr kf_1 = problem->emplaceKeyFrame((Vector7d()<<0,0,0,0,0,0,1).finished(), TimeStamp(0));
-    FrameBasePtr kf_2 = problem->emplaceKeyFrame((Vector7d()<<0,0,0,0,0,0,1).finished(), TimeStamp(0));
-    // and two other frames to observe the lmk
-    FrameBasePtr kf_3 = problem->emplaceKeyFrame((Vector7d()<<0,-1,0,0,0,0,1).finished(), TimeStamp(0));
-    FrameBasePtr kf_4 = problem->emplaceKeyFrame((Vector7d()<<0,+1,0,0,0,0,1).finished(), TimeStamp(0));
-
-    kf_1->fix();
-    kf_2->fix();
-    kf_3->fix();
-    kf_4->fix();
-    // ============================================================================================================
-
-    // ============================================================================================================
-    /* 2 */
-    std::string wolf_root = _WOLF_ROOT_DIR;
-    SensorBasePtr sensor_ptr = problem->installSensor("CAMERA", "PinHole", (Vector7d()<<0,0,0,-0.5,0.5,-0.5,0.5).finished(), wolf_root + "/src/examples/camera_params_ueye_sim.yaml");
-    SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(sensor_ptr);
-    // ============================================================================================================
-
-    // ============================================================================================================
-    /* 3 */
-    Eigen::Vector3d lmk_dir = {0,0,1}; // in the optical axis of the anchor camera at kf1
-    std::cout << std::endl << "lmk: " << lmk_dir.transpose() << std::endl;
-    lmk_dir.normalize();
-    Eigen::Vector4d lmk_hmg_c;
-    double distance = 1.0; // from anchor at kf1
-    lmk_hmg_c = {lmk_dir(0),lmk_dir(1),lmk_dir(2),(1/distance)};
-//    std::cout << "lmk hmg in C frame: " << lmk_hmg_c.transpose() << std::endl;
-    // ============================================================================================================
-
-    // Captures------------------
-    cv::Mat cv_image;
-    cv_image.zeros(2,2,0);
-    CaptureImagePtr image_0 = std::make_shared<CaptureImage>(TimeStamp(0), camera, cv_image);
-    CaptureImagePtr image_1 = std::make_shared<CaptureImage>(TimeStamp(1), camera, cv_image);
-    CaptureImagePtr image_2 = std::make_shared<CaptureImage>(TimeStamp(2), camera, cv_image);
-    kf_2->addCapture(image_0);
-    kf_3->addCapture(image_1);
-    kf_4->addCapture(image_2);
-
-    // Features-----------------
-    cv::Mat desc;
-
-    cv::KeyPoint kp_0;
-    FeaturePointImagePtr feat_0 = std::make_shared<FeaturePointImage>(kp_0, 0, desc, Eigen::Matrix2d::Identity());
-    image_0->addFeature(feat_0);
-
-    cv::KeyPoint kp_1;
-    FeaturePointImagePtr feat_1 = std::make_shared<FeaturePointImage>(kp_1, 0, desc, Eigen::Matrix2d::Identity());
-    image_1->addFeature(feat_1);
-
-    cv::KeyPoint kp_2;
-    FeaturePointImagePtr feat_2 = std::make_shared<FeaturePointImage>(kp_2, 0, desc, Eigen::Matrix2d::Identity());
-    image_2->addFeature(feat_2);
-
-    // Landmark--------------------
-    LandmarkAhpPtr lmk_1 = std::make_shared<LandmarkAhp>(lmk_hmg_c, kf_1, camera, desc);
-    problem->addLandmark(lmk_1);
-    lmk_1->fix();
-    std::cout << "Landmark 1: " << lmk_1->point().transpose() << std::endl;
-
-    // Factors------------------
-    FactorAhpPtr fac_0 = FactorAhp::create(feat_0, lmk_1, nullptr);
-    feat_0->addFactor(fac_0);
-    FactorAhpPtr fac_1 = FactorAhp::create(feat_1, lmk_1, nullptr);
-    feat_1->addFactor(fac_1);
-    FactorAhpPtr fac_2 = FactorAhp::create(feat_2, lmk_1, nullptr);
-    feat_2->addFactor(fac_2);
-
-    // Projections----------------------------
-    Eigen::VectorXd pix_0 = fac_0->expectation();
-    kp_0 = cv::KeyPoint(pix_0(0), pix_0(1), 0);
-    feat_0->setKeypoint(kp_0);
-
-    Eigen::VectorXd pix_1 = fac_1->expectation();
-    kp_1 = cv::KeyPoint(pix_1(0), pix_1(1), 0);
-    feat_1->setKeypoint(kp_1);
-
-    Eigen::VectorXd pix_2 = fac_2->expectation();
-    kp_2 = cv::KeyPoint(pix_2(0), pix_2(1), 0);
-    feat_2->setKeypoint(kp_2);
-
-    std::cout << "pixel 0: " << pix_0.transpose() << std::endl;
-    std::cout << "pixel 1: " << pix_1.transpose() << std::endl;
-    std::cout << "pixel 2: " << pix_2.transpose() << std::endl;
-    //
-    //======== up to here the initial projections ==============
-
-    std::cout << "\n";
-
-    //======== now we want to estimate a new lmk ===============
-    //
-    // Features -----------------
-    FeaturePointImagePtr feat_3 = std::make_shared<FeaturePointImage>(kp_1, 0, desc, Eigen::Matrix2d::Identity());
-    image_1->addFeature(feat_3);
-
-    FeaturePointImagePtr feat_4 = std::make_shared<FeaturePointImage>(kp_2, 0, desc, Eigen::Matrix2d::Identity());
-    image_2->addFeature(feat_4);
-
-    // New landmark with measured pixels from kf2 (anchor) kf3 and kf4 (measurements)
-    double unknown_distance = 2; // the real distance is 1
-    Matrix3d K = camera->getIntrinsicMatrix();
-    Vector3d pix_0_hmg;
-    pix_0_hmg << pix_0, 1;
-    Eigen::Vector3d dir_0 = K.inverse() * pix_0_hmg;
-    Eigen::Vector4d pnt_hmg_0;
-    pnt_hmg_0 << dir_0, 1/unknown_distance;
-    LandmarkAhpPtr lmk_2( std::make_shared<LandmarkAhp>(pnt_hmg_0, kf_2, camera, desc) );
-    problem->addLandmark(lmk_2);
-    std::cout << "Landmark 2: " << lmk_2->point().transpose() << std::endl;
-
-    // New factors from kf3 and kf4
-    FactorAhpPtr fac_3 = FactorAhp::create(feat_3, lmk_2, nullptr);
-    feat_3->addFactor(fac_3);
-    FactorAhpPtr fac_4 = FactorAhp::create(feat_4, lmk_2, nullptr);
-    feat_4->addFactor(fac_4);
-
-    Eigen::Vector2d pix_3 = fac_3->expectation();
-    Eigen::Vector2d pix_4 = fac_4->expectation();
-
-    std::cout << "pix 3: " << pix_3.transpose() << std::endl;
-    std::cout << "pix 4: " << pix_4.transpose() << std::endl;
-
-    // Wolf tree status ----------------------
-    problem->print(3);
-//    problem->check();
-
-    // ========== solve ==================================================================================================
-    /* 12 */
-    // Ceres wrapper
-    SolverCeres solver(problem);
-    solver.getSolverOptions().minimizer_type = ceres::TRUST_REGION; //ceres::TRUST_REGION;LINE_SEARCH
-    solver.getSolverOptions().max_line_search_step_contraction = 1e-3;
-    //    solver.getSolverOptions().minimizer_progress_to_stdout = false;
-    //    solver.getSolverOptions().line_search_direction_type = ceres::LBFGS;
-    //    solver.getSolverOptions().max_num_iterations = 100;
-    google::InitGoogleLogging(argv[0]);
-
-
-    std::string summary = solver.solve(SolverManager::ReportVerbosity::FULL);// 0: nothing, 1: BriefReport, 2: FullReport
-    std::cout << summary << std::endl;
-
-    // Test of convergence over the lmk params
-    bool pass = (lmk_2->point() - lmk_1->point()).isMuchSmallerThan(1,Constants::EPS);
-
-    std::cout << "Landmark 2 below should have converged to Landmark 1:" << std::endl;
-    std::cout << "Landmark 1: " << lmk_1->point().transpose() << std::endl;
-    std::cout << "Landmark 2: " << lmk_2->point().transpose() << std::endl;
-    std::cout << "Landmark convergence test " << (pass ? "PASSED" : "FAILED") << std::endl;
-    std::cout << std::endl;
-
-    if (!pass)
-        return -1;
-    return 0;
-
-}
-
diff --git a/demos/demo_tracker_ORB.cpp b/demos/demo_tracker_ORB.cpp
deleted file mode 100644
index f127990da8126ac2677fc81314147fca1c017be6..0000000000000000000000000000000000000000
--- a/demos/demo_tracker_ORB.cpp
+++ /dev/null
@@ -1,288 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-//std includes
-#include <iostream>
-
-// Vision utils
-#include <vision_utils.h>
-#include <sensors.h>
-#include <common_class/buffer.h>
-#include <common_class/frame.h>
-#include <detectors/orb/detector_orb.h>
-#include <descriptors/orb/descriptor_orb.h>
-#include <matchers/bruteforce_hamming_2/matcher_bruteforce_hamming_2.h>
-
-//Wolf
-#include "core/processor/processor_tracker_landmark_image.h"
-
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-
-    std::cout << std::endl << "==================== tracker ORB test ======================" << std::endl;
-
-    //=====================================================
-    // Environment variable for configuration files
-    std::string wolf_root = _WOLF_ROOT_DIR;
-    //=====================================================
-
-    //=====================================================
-
-    // Sensor or sensor recording
-    vision_utils::SensorCameraPtr sen_ptr = vision_utils::askUserSource(argc, argv);
-    if (sen_ptr==NULL)
-        return 0;
-
-    // Detector
-    vision_utils::DetectorParamsORBPtr params_det = std::make_shared<vision_utils::DetectorParamsORB>();
-
-    params_det->nfeatures = 500;        // The maximum number of features to retain.
-    params_det->scaleFactor = 2;        // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
-    params_det->nlevels = 8;            // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
-    params_det->edgeThreshold = 16;     // This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
-    params_det->firstLevel = 0;         // It should be 0 in the current implementation.
-    params_det->WTA_K = 2;              // The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
-    params_det->scoreType = cv::ORB::HARRIS_SCORE; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-    params_det->patchSize = 31;
-
-    vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector("ORB", "ORB detector", params_det);
-    vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
-
-    // Descriptor
-    vision_utils::DescriptorParamsORBPtr params_des = std::make_shared<vision_utils::DescriptorParamsORB>();
-
-    params_des->nfeatures = 500;        // The maximum number of features to retain.
-    params_des->scaleFactor = 2;        // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
-    params_des->nlevels = 8;            // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
-    params_des->edgeThreshold = 16;     // This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
-    params_des->firstLevel = 0;         // It should be 0 in the current implementation.
-    params_des->WTA_K = 2;              // The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
-    params_des->scoreType = cv::ORB::HARRIS_SCORE; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-    params_des->patchSize = 31;
-
-    vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor("ORB", "ORB descriptor", params_des);
-    vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
-
-    // Matcher
-    vision_utils::MatcherParamsBRUTEFORCE_HAMMING_2Ptr params_mat = std::make_shared<vision_utils::MatcherParamsBRUTEFORCE_HAMMING_2>();
-    vision_utils::MatcherBasePtr mat_b_ptr = vision_utils::setupMatcher("BRUTEFORCE_HAMMING_2", "BRUTEFORCE_HAMMING_2 matcher", params_mat);
-    vision_utils::MatcherBRUTEFORCE_HAMMING_2Ptr mat_ptr = std::static_pointer_cast<vision_utils::MatcherBRUTEFORCE_HAMMING_2>(mat_b_ptr);
-
-    //=====================================================
-
-    unsigned int buffer_size = 8;
-    vision_utils::Buffer<vision_utils::FramePtr> frame_buff(buffer_size);
-    frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), 0) );
-
-    unsigned int img_width  = frame_buff.back()->getImage().cols;
-    unsigned int img_height = frame_buff.back()->getImage().rows;
-    std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
-
-    cv::namedWindow("Feature tracker");    // Creates a window for display.
-    cv::moveWindow("Feature tracker", 0, 0);
-    cv::startWindowThread();
-    cv::imshow("Feature tracker", frame_buff.back()->getImage());
-    cv::waitKey(1);
-
-    KeyPointVector target_keypoints;
-    KeyPointVector tracked_keypoints_;
-    KeyPointVector tracked_keypoints_2;
-    KeyPointVector current_keypoints;
-    cv::Mat target_descriptors;
-    cv::Mat tracked_descriptors;
-    cv::Mat tracked_descriptors2;
-    cv::Mat current_descriptors;
-    cv::Mat image_original = frame_buff.back()->getImage();
-    cv::Mat image_graphics;
-
-    unsigned int roi_width = 200;
-    unsigned int roi_heigth = 200;
-
-    int n_first_1 = 0;
-    int n_second_1 = 0;
-
-    // Initial detection
-    target_keypoints = det_ptr->detect(image_original);
-    target_descriptors = des_ptr->getDescriptor(image_original, target_keypoints);
-
-    for (unsigned int f_num=0; f_num < 1000; ++f_num)
-    {
-        frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), f_num) );
-
-        KeyPointVector keypoints;
-        cv::Mat descriptors;
-        DMatchVector cv_matches;
-        cv::Mat image = frame_buff.back()->getImage();
-        image_graphics = image.clone();
-        bool matched = false;
-        n_first_1 = n_second_1 = 0;
-
-        unsigned int tracked_keypoints = 0;
-
-        for(unsigned int target_idx = 0; target_idx < target_keypoints.size(); target_idx++)
-        {
-            std::cout << "\npixel: " << target_keypoints[target_idx].pt << std::endl;
-            std::cout << "target_descriptor[" << target_idx << "]:\n" << target_descriptors.row(target_idx) << std::endl;
-
-            matched = false;
-
-            cv::Rect roi = vision_utils::setRoi(target_keypoints[target_idx].pt.x, target_keypoints[target_idx].pt.y, roi_width, roi_heigth);
-
-            cv::Point2f roi_up_left_corner;
-            roi_up_left_corner.x = roi.x;
-            roi_up_left_corner.y = roi.y;
-
-            for(unsigned int fr = 0; fr < 2; fr++)
-            {
-                keypoints = det_ptr->detect(image, roi);
-                descriptors = des_ptr->getDescriptor(image, keypoints);
-
-                cv::Mat target_descriptor; //B(cv::Rect(0,0,vec_length,1));
-                target_descriptor = target_descriptors(cv::Rect(0,target_idx,target_descriptors.cols,1));
-
-                if(keypoints.size() != 0)
-                {
-                    mat_ptr->match(target_descriptor, descriptors, des_ptr->getSize(), cv_matches);
-                    double normalized_score = 1 - (double)(cv_matches[0].distance)/(des_ptr->getSize()*8);
-                    std::cout << "pixel: " << keypoints[cv_matches[0].trainIdx].pt + roi_up_left_corner << std::endl;
-                    std::cout << "normalized score: " << normalized_score << std::endl;
-                    if(normalized_score < 0.8)
-                    {
-                        std::cout << "not tracked" << std::endl;
-                    }
-                    else
-                    {
-                        std::cout << "tracked" << std::endl;
-
-                        matched = true;
-
-                        cv::Point2f point,t_point;
-                        point.x = keypoints[cv_matches[0].trainIdx].pt.x;
-                        point.y = keypoints[cv_matches[0].trainIdx].pt.y;
-                        t_point.x = target_keypoints[target_idx].pt.x;
-                        t_point.y = target_keypoints[target_idx].pt.y;
-
-                        cv::circle(image_graphics, t_point, 4, cv::Scalar(51.0, 51.0, 255.0), -1, 3, 0);
-                        cv::circle(image_graphics, point, 2, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
-                        cv::putText(image_graphics, std::to_string(target_idx), point, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 255.0, 0.0));
-
-                        //introduce in list - tracked point
-                        cv::KeyPoint tracked_kp = keypoints[cv_matches[0].trainIdx];
-                        tracked_kp.pt.x = tracked_kp.pt.x + roi.x;
-                        tracked_kp.pt.y = tracked_kp.pt.y + roi.y;
-                        if(fr==0)
-                            tracked_keypoints_.push_back(tracked_kp);
-                        else
-                            tracked_keypoints_2.push_back(tracked_kp);
-
-                        cv::Mat tracked_desc;
-                        tracked_desc = descriptors(cv::Rect(0,cv_matches[0].trainIdx,target_descriptors.cols,1));
-                        if(fr==0)
-                            tracked_descriptors.push_back(tracked_desc);
-                        else
-                            tracked_descriptors2.push_back(tracked_desc);
-
-                        //introduce in list - target point
-                        if(fr==0)
-                        {
-                            current_keypoints.push_back(target_keypoints[target_idx]);
-                            current_descriptors.push_back(target_descriptor);
-                        }
-
-                        if (fr == 0 && normalized_score == 1)n_first_1++;
-                        if (fr == 1 && normalized_score == 1)n_second_1++;
-                    }
-                }
-                else
-                    std::cout << "not tracked" << std::endl;
-
-            }
-            if (matched) tracked_keypoints++;
-        }
-
-        std::cout << "\ntracked keypoints: " << tracked_keypoints << "/" << target_keypoints.size() << std::endl;
-        std::cout << "percentage first: " << ((float)((float)tracked_keypoints/(float)target_keypoints.size()))*100 << "%" << std::endl;
-        std::cout << "Number of perfect first matches: " << n_first_1 << std::endl;
-        std::cout << "Number of perfect second matches: " << n_second_1 << std::endl;
-
-        if(tracked_keypoints == 0)
-        {
-            target_keypoints = det_ptr->detect(image);
-            target_descriptors = des_ptr->getDescriptor(image, target_keypoints);
-            std::cout << "number of new keypoints to be tracked: " << target_keypoints.size() << std::endl;
-        }
-        else
-        {
-            std::cout << "\n\nADVANCE" << std::endl;
-//            for(unsigned int i = 0; i < target_keypoints.size(); i++)
-//            {
-//                std::cout << "\ntarget keypoints";
-//                std::cout << target_keypoints[i].pt;
-//            }
-//            for(unsigned int i = 0; i < current_keypoints.size(); i++)
-//            {
-//                std::cout << "\ncurrent keypoints";
-//                std::cout << current_keypoints[i].pt;
-//            }
-//            for( int i = 0; i < target_descriptors.rows; i++)
-//            {
-//                std::cout << "\ntarget descriptors";
-//                std::cout << target_descriptors.row(i);
-//            }
-//            for( int i = 0; i < current_descriptors.rows; i++)
-//            {
-//                std::cout << "\ncurrent descriptors";
-//                std::cout << current_descriptors.row(i);
-//            }
-//            for( int i = 0; i < current_descriptors2.rows; i++)
-//            {
-//                std::cout << "\ncurrent descriptors2";
-//                std::cout << current_descriptors2.row(i);
-//            }
-
-            //target_keypoints.clear();
-            target_keypoints = current_keypoints;
-            current_descriptors.copyTo(target_descriptors);
-            current_keypoints.clear();
-            current_descriptors.release();
-
-            std::cout << "\nAFTER THE ADVANCE" << std::endl;
-//            for(unsigned int i = 0; i < target_keypoints.size(); i++)
-//            {
-//                std::cout << "target keypoints";
-//                std::cout << target_keypoints[i].pt << "\t" ;
-//            }
-//            for( int i = 0; i < target_descriptors.rows; i++)
-//            {
-//                std::cout << "\ntarget descriptors";
-//                std::cout << target_descriptors.row(i);
-//            }
-
-            std::cout << "\nEND OF ADVANCE\n";
-
-        }
-
-        tracked_keypoints = 0;
-        cv::imshow("Feature tracker", image_graphics);
-        cv::waitKey(1);
-    }
-}
diff --git a/demos/demo_trifocal_optimization.cpp b/demos/demo_trifocal_optimization.cpp
deleted file mode 100644
index 8e4a9e642cd85e43c361518d77a453cd82f95c66..0000000000000000000000000000000000000000
--- a/demos/demo_trifocal_optimization.cpp
+++ /dev/null
@@ -1,274 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-// Testing creating wolf tree from imported .graph file
-
-//C includes for sleep, time and main args
-#include "unistd.h"
-
-//std includes
-#include <iostream>
-
-// Vision utils
-#include <vision_utils.h>
-
-//Wolf includes
-#include "../processors/processor_tracker_feature_trifocal.h"
-#include "../capture_image.h"
-#include "../sensor_camera.h"
-#include "../ceres_wrapper/solver_ceres.h"
-#include "../rotations.h"
-#include "../capture_pose.h"
-#include "../capture_void.h"
-#include "../constraints/constraint_autodiff_distance_3D.h"
-
-Eigen::VectorXd get_random_state(const double& _LO, const double& _HI)
-{
-    double range= _HI-_LO;
-    Eigen::VectorXd x = Eigen::VectorXd::Random(7); // Vector filled with random numbers between (-1,1)
-    x = (x + Eigen::VectorXd::Constant(7,1.0))*range/2.; // add 1 to the vector to have values between 0 and 2; multiply with range/2
-    x = (x + Eigen::VectorXd::Constant(7,_LO)); //set LO as the lower bound (offset)
-    x.segment(3,4).normalize(); // Normalize quaternion part
-    return x;
-}
-
-int main(int argc, char** argv)
-{
-    using namespace wolf;
-    using std::shared_ptr;
-    using std::make_shared;
-    using std::static_pointer_cast;
-    using Eigen::Vector2d;
-
-    std::string wolf_root = _WOLF_ROOT_DIR;
-
-    // ===============================================
-    // TEST IMAGES ===================================
-    // ===============================================
-
-    // x,y displacement, negatives values are directly added in the path string (row-wise).
-    Eigen::MatrixXd img_pos = Eigen::MatrixXd::Zero(10,2);
-    img_pos.row(0) <<  0, 0;
-    img_pos.row(1) << -1, 0;
-    img_pos.row(2) << -2, 0;
-    img_pos.row(3) << -2,-1;
-    img_pos.row(4) << -2,-2;
-    img_pos.row(5) << -1,-2;
-    img_pos.row(6) <<  0,-2;
-    img_pos.row(7) <<  0,-1;
-    img_pos.row(8) <<  0, 0;
-    img_pos.row(9) << -1, 0;
-
-    // read image
-    std::cout << std::endl << "-> Reading images from ground-truth movements..." << std::endl;
-    std::vector<cv::Mat> images;
-    for (unsigned int ii = 0; ii < img_pos.rows(); ++ii)
-    {
-        std::string img_path = wolf_root + "/src/examples/Test_gazebo_x" + std::to_string((int)img_pos(ii,0)) + "0cm_y" + std::to_string((int)img_pos(ii,1)) + "0cm.jpg";
-        std::cout << " |->" << img_path << std::endl;
-        images.push_back(cv::imread(img_path, CV_LOAD_IMAGE_GRAYSCALE));   // Read the file
-        if(! images.at(ii).data )                              // Check for invalid input
-        {
-            std::cout <<  " X--Could not open or find the image: " << img_path << std::endl ;
-            return -1;
-        }
-    }
-    std::cout << std::endl;
-
-
-    // Scale ground truth priors
-    img_pos *= 0.10;
-
-    cv::imshow( "DEBUG VIEW", images.at(0) );  // Show our image inside it.
-    cv::waitKey(1);                            // Wait for a keystroke in the window
-
-    // ===============================================
-    // CONFIG WOLF ===================================
-    // ===============================================
-
-    // Wolf problem
-    ProblemPtr problem = Problem::create("PO", 3);
-
-    // CERES WRAPPER
-    SolverCeresPtr solver = make_shared<SolverCeres>(problem, ceres_options);
-    solver->getSolverOptions().max_num_iterations = 50;
-    solver->getSolverOptions().function_tolerance = 1e-6;
-
-    // Install tracker (sensor and processor)
-    Eigen::Vector7d cam_ext; cam_ext << 0.0,0.0,0.0, 0.0,0.0,0.0,1.0;
-    std::string cam_intr_yaml = wolf_root + "/src/examples/camera_params_1280x960_ideal.yaml";
-    SensorBasePtr sensor = problem->installSensor("CAMERA","camera",cam_ext,cam_intr_yaml);
-    SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(sensor);
-
-    std::string proc_trifocal_params_yaml = wolf_root + "/src/examples/processor_tracker_feature_trifocal.yaml";
-    ProcessorBasePtr processor = problem->installProcessor("TRACKER FEATURE TRIFOCAL","trifocal","camera", proc_trifocal_params_yaml);
-    ProcessorTrackerFeatureTrifocalPtr tracker = std::static_pointer_cast<ProcessorTrackerFeatureTrifocal>(processor);
-
-    // ===============================================
-    // KF1 (PRIOR) ===================================
-    // ===============================================
-
-    // Set problem PRIOR
-    double dt = 0.01;
-    TimeStamp   t(0.0);
-    Vector7d    x; x <<  img_pos.row(0).transpose(), 0.0,   0.0, 0.0, 0.0, 1.0;
-    x.segment(3,4).normalize();
-    Matrix6d    P = Matrix6d::Identity() * 0.000001; // 1mm
-
-    // ====== KF1 ======
-    FrameBasePtr kf1 = problem->setPrior(x, P, t);
-
-    // Process capture
-    CaptureImagePtr capture_1 = make_shared<CaptureImage>(t, camera, images.at(0));
-    camera->process(capture_1);
-
-    // Verify KFs
-    FrameBasePtr kf1_check = capture_1->getFramePtr();
-    assert(kf1->id()==kf1_check->id() && "Prior and KF1 are not the same!");
-
-//    problem->print(2,0,1,0);
-
-
-    // ===============================================
-    // Other KFs =====================================
-    // ===============================================
-    int kf_total_num = img_pos.rows();
-    std::vector<FrameBasePtr> kfs;
-    kfs.push_back(kf1);
-    std::vector<CaptureImagePtr> captures;
-    captures.push_back(capture_1);
-
-    for (int kf_id = 2; kf_id <= kf_total_num; ++kf_id)
-    {
-        t += dt; // increment t
-
-        if ( (kf_id % 2 == 1) )
-        {
-            x << img_pos.row(kf_id-1).transpose(),0.0,    0.0,0.0,0.0,1.0; // ground truth position
-            FrameBasePtr kf = problem->emplaceFrame(KEY_FRAME,x,t);
-            std::cout << "KeyFrm " << kf->id() << " TS: " << kf->getTimeStamp() << std::endl;
-            kfs.push_back(kf);
-            problem->keyFrameCallback(kf,nullptr,dt/2.0); // Ack KF creation
-        }
-
-        CaptureImagePtr capture = make_shared<CaptureImage>(t, camera, images.at(kf_id-1));
-        captures.push_back(capture);
-        std::cout << "Capture " << kf_id << " TS: " << capture->getTimeStamp() << std::endl;
-        camera->process(capture);
-
-        cv::waitKey(1); // Wait for a keystroke in the window
-    }
-
-    // ==================================================
-    // Establish Scale Factor (to see results scaled)
-    // ==================================================
-
-    std::cout << "================== ADD Scale constraint ========================" << std::endl;
-
-    // Distance constraint
-    Vector1d distance(0.2);      // 2x10cm distance -- this fixes the scale
-    Matrix1d dist_cov(0.000001); // 1mm error
-
-    CaptureBasePtr
-    cap_dist  = problem->closestKeyFrameToTimeStamp(TimeStamp(2*dt))->addCapture(make_shared<CaptureVoid>(t,
-                                                                                                          sensor));
-    FeatureBasePtr
-    ftr_dist = cap_dist->addFeature(make_shared<FeatureBase>("DISTANCE",
-                                                              distance,
-                                                              dist_cov));
-    FactorBasePtr
-    ctr_dist  = ftr_dist->addFactor(make_shared<FactorAutodiffDistance3D>(ftr_dist,
-                                                                                  kfs.at(0),
-                                                                                  nullptr));
-    kfs.at(0)->addConstrainedBy(ctr_dist);
-
-    problem->print(1,1,1,0);
-
-    // ===============================================
-    // SOLVE PROBLEM (1) =============================
-    // ===============================================
-
-    std::cout << "================== SOLVE 1rst TIME ========================" << std::endl;
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::FULL);
-    std::cout << report << std::endl;
-
-    problem->print(1,1,1,0);
-
-    // Print orientation states for all KFs
-    for (auto kf : problem->getTrajectoryPtr()->getFrameList())
-        std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getOPtr()->getState()).transpose()*180.0/3.14159 << std::endl;
-
-
-    // ===============================================
-    // COVARIANCES ===================================
-    // ===============================================
-    // GET COVARIANCES of all states
-    WOLF_TRACE("======== COVARIANCES OF SOLVED PROBLEM =======")
-    solver->computeCovariances(SolverManager::CovarianceBlocksToBeComputed::ALL_MARGINALS);
-    for (auto kf : problem->getTrajectoryPtr()->getFrameList())
-        if (kf->isKey())
-        {
-            Eigen::MatrixXd cov = kf->getCovariance();
-            WOLF_TRACE("KF", kf->id(), "_std (sigmas) = ", cov.diagonal().transpose().array().sqrt());
-        }
-    std::cout << std::endl;
-
-    // ===============================================
-    // PERTURBATE STATES =============================
-    // ===============================================
-
-    // ADD PERTURBATION
-    std::cout << "================== ADD PERTURBATION ========================" << std::endl;
-
-    for (auto kf : problem->getTrajectoryPtr()->getFrameList())
-    {
-        if (kf != kf1)
-        {
-            Eigen::Vector7d perturbation; perturbation << Vector7d::Random() * 0.05;
-            Eigen::Vector7d state_perturbed = kf->getState() + perturbation;
-            state_perturbed.segment(3,4).normalize();
-            kf->setState(state_perturbed);
-            std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getOPtr()->getState()).transpose()*180.0/3.14159 << std::endl;
-        }
-    }
-
-    problem->print(1,1,1,0);
-
-    // ===============================================
-    // SOLVE PROBLEM (2) =============================
-    // ===============================================
-
-    // ===== SOLVE SECOND TIME =====
-    report = solver->solve(SolverManager::ReportVerbosity::FULL);
-
-    std::cout << report << std::endl;
-
-    std::cout << "================== AFTER SOLVE 2nd TIME ========================" << std::endl;
-    problem->print(1,1,1,0);
-
-    for (auto kf : problem->getTrajectoryPtr()->getFrameList())
-        std::cout << "KF" << kf->id() << " Euler deg " << wolf::q2e(kf->getOPtr()->getState()).transpose()*180.0/3.14159 << std::endl;
-
-    cv::waitKey(0); // Wait for a keystroke in the window
-
-    return 0;
-}
diff --git a/demos/demo_visual_odometry.cpp b/demos/demo_visual_odometry.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f3b0c88f3c43cc9cad49809ced1194befdfc89f1
--- /dev/null
+++ b/demos/demo_visual_odometry.cpp
@@ -0,0 +1,116 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+//std
+#include <iostream>
+
+//Wolf
+#include <core/common/wolf.h>
+#include "core/yaml/parser_yaml.h"
+#include <core/problem/problem.h>
+#include "vision/sensor/sensor_camera.h"
+#include "vision/processor/processor_visual_odometry.h"
+#include "vision/capture/capture_image.h"
+#include "vision/internal/config.h"
+
+
+using namespace wolf;
+
+std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
+
+int main(int argc, char** argv)
+{
+
+
+    WOLF_INFO("======== CONFIGURE PROBLEM =======");
+
+    // Config file to parse. Here is where all the problem is defined:
+    std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
+    std::string config_file = "demos/demo_visual_odometry.yaml";
+
+    // parse file into params server: each param will be retrievable from this params server:
+    ParserYaml parser       = ParserYaml(config_file, wolf_vision_root);
+    ParamsServer server     = ParamsServer(parser.getParams());
+    // Wolf problem: automatically build the left branch of the wolf tree from the contents of the params server:
+    ProblemPtr problem      = Problem::autoSetup(server);
+
+    // Print problem to see its status before processing any sensor data
+    problem->print(4,0,1,0);
+
+    // recover sensor pointers and other stuff for later use (access by sensor name)
+    SensorCameraPtr sensor_cam = std::dynamic_pointer_cast<SensorCamera>(problem->findSensor("sen cam"));
+    // ProcessorVisualOdometryPtr proc_vo = std::dynamic_pointer_cast<ProcessorVisualOdometry>(problem->getProcessor("proc vo"));
+
+    //    ==============================================================================
+
+    // std::string euroc_data_folder = "/home/mfourmy/Documents/Phd_LAAS/data/Euroc/MH_01_easy/mav0/cam0/data/";
+    cv::String euroc_data_path("/home/mfourmy/Documents/Phd_LAAS/data/Euroc/MH_01_easy/mav0/cam0/data/*.png");
+    std::vector<cv::String> fn;
+    cv::glob(euroc_data_path, fn, true); // recurse
+
+    TimeStamp t = 0;
+    // unsigned int number_of_KFs = 0;
+    // main loop
+    double dt = 0.05;
+
+    std::cout << sensor_cam->getCategory() << std::endl;
+
+
+    // for (size_t k=0; k < fn.size(); ++k)
+    for (size_t k=0; k < 20; ++k)
+    {
+        cv::Mat img = cv::imread(fn[k], cv::IMREAD_GRAYSCALE);
+
+        //////////////////////////
+        // Correct img
+        // 
+        // ....... 
+        //
+        //////////////////////////
+
+        CaptureImagePtr image = std::make_shared<CaptureImage>(t, sensor_cam, img);
+        sensor_cam->process(image);
+
+        // problem->print(3,0,1,1);
+
+
+        // // solve only when new KFs are added
+        // if (problem->getTrajectory()->getFrameMap().size() > number_of_KFs)
+        // {
+        //     number_of_KFs = problem->getTrajectory()->getFrameMap().size();
+        //     std::string report = solver->solve(wolf::SolverManager::ReportVerbosity::BRIEF);
+        //     std::cout << report << std::endl;
+        //     if (number_of_KFs > 5)
+        //     	break;
+        // }
+
+        // cv::waitKey();//1e7);
+
+        t += dt;
+
+
+    }
+    problem->print(3,0,1,1);
+
+
+    return 0;
+}
+
diff --git a/demos/demo_visual_odometry.yaml b/demos/demo_visual_odometry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bfdc5d18b3e4ea847084783a7c3a821fd541b7df
--- /dev/null
+++ b/demos/demo_visual_odometry.yaml
@@ -0,0 +1,51 @@
+config:
+
+  problem:
+  
+    dimension:            3               # space is 3d
+    frame_structure:      "PO"            # keyframes have position and orientation
+  
+    prior:
+      mode:               "factor"
+      $state:
+        P: [0,0,0]
+        O: [0,0,0,1]
+      $sigma:
+        P: [0.001, 0.001, 0.001]
+        O: [0.31, 0.31, 0.31]
+      time_tolerance:     0.005
+
+    tree_manager:
+      type: "none"
+      plugin: "core"
+  
+  solver:
+    max_num_iterations: 100
+    verbose: 0
+    period: 0.2
+    interrupt_on_problem_change: false
+    n_threads: 2
+    compute_cov: false
+    minimizer: LEVENBERG_MARQUARDT
+    use_nonmonotonic_steps: false         # only for LEVENBERG_MARQUARDT and DOGLEG
+    function_tolerance: 0.000001
+    gradient_tolerance: 0.0000000001
+    
+  sensors:
+    
+    - type:               "SensorCamera"
+      plugin:             "vision"
+      name:               "sen cam"
+      extrinsic: 
+        pose:             [0,0,0, 0,0,0,1]
+      using_raw:          true
+      follow:             "demos/camera_euroc_mav0_notang.yaml"         # config parameters in this file
+
+  processors:
+    
+    - type:               "ProcessorVisualOdometry"
+      plugin:             "vision"
+      name:               "prc vo"
+      sensor_name:        "sen cam"                                    # attach processor to this sensor
+      follow:             "demos/processor_visual_odometry.yaml"        # config parameters in this file
+    
\ No newline at end of file
diff --git a/demos/processor_bundle_adjustment_vision_utils.yaml b/demos/processor_bundle_adjustment_vision_utils.yaml
deleted file mode 100644
index 305f609b222c50c27f94f0f22ab44906fc378bfd..0000000000000000000000000000000000000000
--- a/demos/processor_bundle_adjustment_vision_utils.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-sensor:
-  type: "USB_CAM"
-
-detector:
-  type: "ORB"         
-  nfeatures: 400
-  scale factor: 1.0
-  nlevels: 1
-  edge threshold: 31   # 16
-  first level: 0 
-  WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1      #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 31      # 31
-
-descriptor:
-  type: "ORB"         
-  nfeatures: 400
-  scale factor: 1.0
-  nlevels: 1
-  edge threshold: 31   # 16
-  first level: 0 
-  WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 31      # 31
-
-matcher:
-  type: "FLANNBASED"  # BRUTEFORCE, BRUTEFORCE_HAMMING, BRUTEFORCE_HAMMING_2, BRUTEFORCE_L1, FLANNBASED
-  match type: 1     #  Match type. MATCH = 1, KNNMATCH = 2, RADIUSMATCH = 3
-  filtering:
-    ransac epipolar distance: 3  # Distance to epipolar [pixels]
-    ransac confidence prob: 0.97 # Confidence probability
-    max match euclidean dist: 20 # Max euclidean distance to consider a match as inlier       
-    
\ No newline at end of file
diff --git a/demos/processor_visual_odometry.yaml b/demos/processor_visual_odometry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4f19f6af07466e337b79a0bb27f77058ee139244
--- /dev/null
+++ b/demos/processor_visual_odometry.yaml
@@ -0,0 +1,71 @@
+time_tolerance: 0.005
+
+keyframe_vote:
+    voting_active: true
+    # Trigger a new keyframe creation as well as detection of new keypoints in last frame
+    # when the track number goes below min_features_for_keyframe in incoming
+    min_features_for_keyframe: 20
+
+# Use a robust cost function
+apply_loss_function: true
+
+# Select the best new Keypoints when performing detection
+max_new_features: 100
+
+####################################
+# ProcessorVisualOdometry parameters
+
+# Image Equalization methods
+#   0: none
+#   1: average
+#   2: histogram_equalization
+#   3: CLAHE = Contrast Limited Adaptive Histogram Equalization, 1-1.5 ms overhead
+equalization:
+    method: 1
+    average:
+        median: 127 # half of 8 bits = 255/2 = 127.5 --> to integer <-- TODO we could maybe automate this
+    histogram:
+    clahe:
+        clip_limit: 2
+        tile_grid_size: [8,8]
+
+# FAST KeyPoint detection 
+fast:
+    # Threshold on the keypoint pixel intensity (in uchar [0-255]) 
+    # the higher, the more selective the detector is
+    threshold: 30
+    # Avoids getting multiple keypoints at the same place
+    non_max_suppresion: true
+    
+# Lucas Kanade tracking parameters
+klt:
+    patch_width: 15
+    patch_height: 15
+    nlevels_pyramids: 3
+    max_err: 0.3
+
+# tesselation grid    
+grid:
+    # number of cells used by the active search grid data structure
+    nbr_cells_h: 6  # horizontal
+    nbr_cells_v: 5  # vertical
+    # minimum margin of the region of interest from the edges of the image 
+    margin: 10
+    # reduce the size of each region of interest by n pixels to prevent keypoints from being too close
+    separation: 10
+
+ransac:
+    # specifies a desirable level of confidence (probability) that the estimated matrix is correct
+    prob: 0.999
+    # maximum distance from a point to an epipolar line in pixels, beyond which the point 
+    # is considered an outlier and is not used for computing the final fundamental matrix
+    thresh: 1
+
+# Keep the number of tracks below 
+max_nb_tracks: 100
+
+# standard deviation of the pixel reprojection factor 
+std_pix: 1
+
+# before creating a landmark, wait until the track is old enough
+min_track_length_for_landmark: 3
diff --git a/doc/.gitignore b/doc/.gitignore
deleted file mode 100644
index ac7af2e80e3a7920c6989974744a27c21db8cd33..0000000000000000000000000000000000000000
--- a/doc/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/html/
diff --git a/doc/Processors_timeline.txt b/doc/Processors_timeline.txt
deleted file mode 100644
index 97719008201f139ce3efe4e22c263f4908e162e1..0000000000000000000000000000000000000000
--- a/doc/Processors_timeline.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-ProcessorBase		ProcessorTracker	PrcTrkLandmark		ProcTrkLmkDerived
-======================================================================================
-process(inc) >		process(inc)		
-					  preProcess()
-
-					  processNew() >	processNew()
-										  detectNewFeat() >	detectNewFeat()
-										    createNewLmks()
-										    createLmk() >	createLmk()
-										  findLmks() >		findLmks()
-
-					  estabConstr() >	estabConstr()
-										  createConstr() >	createConstr()
-
-					  processKnown() >	processKnown()
-										  findLmks() >		findLmks()
-
-					< makeFrame()
-
-makeFrame()
-P.createFr()
-P.kfCallback()
-kfCallback() >		  kfCallback()
-=======================================================================================
-
-
-
-ProcessorBase		ProcessorTracker	PrcTrkFeature		ProcTrkFeatDerived
-=======================================================================================
-process(inc) >		process(inc)		
-					  preProcess() >	preProcess() > 		preProcess()
-
-					  processNew() >	processNew()
-										  detectNewFeat() >	detectNewFeat()
-										  createNewLmks()
-										    createLmk() >	createLmk()
-										  trackFeatures() >	trackFeatures()
-
-			 		    estabConstr() >	estabConstr()
-										  createConstr() >	createConstr()
-
-					  processKnown() >	processKnown()
-										  trackFeatures() >	trackFeatures()
-										  correctFtDrift() >	correctFtDrift()
-
-					< makeFrame()
-
-makeFrame()
-  P.createFr()
-  P.kfCallback()
-  kfCallback() >	kfCallback()
-=======================================================================================
-
-
-
-ProcessorBase		ProcessorMotion		PrcMotionDerived
-==============================================================
-process(inc) >		process(inc)
-					  preProcess() >	preProcess()
-
-					  integrate()
-					    updateDt()
-					    data2delta() >	data2delta()
-					    deltaPlusDelta() >	deltaPlusDelta()
-
-					  voteForKF() >		voteForKF()
-
-					  makeFrame()
-
-					makeFrame()
-					  P.createFrame()
-					  P.kfCallback()
-					  kfCallback()
-
-					kfCallback()
-					  createConstr() >	createConstr()
-					  reintegrate()
-==============================================================
diff --git a/doc/doxygen.conf b/doc/doxygen.conf
index dc3608694f177138711f769ceadb7ef004665ad4..09076a4594e94eb6191319b6dc15a47e5183e844 100644
--- a/doc/doxygen.conf
+++ b/doc/doxygen.conf
@@ -1,4 +1,4 @@
-# Doxyfile 1.8.8
+# Doxyfile 1.9.3
 
 # This file describes the settings to be used by the documentation system
 # doxygen (www.doxygen.org) for a project.
@@ -17,11 +17,11 @@
 # Project related configuration options
 #---------------------------------------------------------------------------
 
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
 # The default value is: UTF-8.
 
 DOXYFILE_ENCODING      = UTF-8
@@ -32,7 +32,7 @@ DOXYFILE_ENCODING      = UTF-8
 # title of most generated pages and in a few other places.
 # The default value is: My Project.
 
-PROJECT_NAME           = "WOLF"
+PROJECT_NAME           = WOLF
 
 # The PROJECT_NUMBER tag can be used to enter a project or revision number. This
 # could be handy for archiving the generated documentation or if some version
@@ -44,14 +44,12 @@ PROJECT_NUMBER         =
 # for a project that appears at the top of each page and should give viewer a
 # quick idea about the purpose of the project. Keep the description short.
 
-PROJECT_BRIEF          = 
-# "A lion and a tiger might be stronger, but a wolf does not perform in circus"
-# Windowed Localization Frames
+PROJECT_BRIEF          =
 
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
 
 PROJECT_LOGO           = ../doc/images/wolf-120.png
 
@@ -62,7 +60,7 @@ PROJECT_LOGO           = ../doc/images/wolf-120.png
 
 OUTPUT_DIRECTORY       = ../doc
 
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
 # directories (in 2 levels) under the output directory of each output format and
 # will distribute the generated files over these directories. Enabling this
 # option can be useful when feeding doxygen a huge amount of source files, where
@@ -95,14 +93,14 @@ ALLOW_UNICODE_NAMES    = NO
 
 OUTPUT_LANGUAGE        = English
 
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
 # descriptions after the members that are listed in the file and class
 # documentation (similar to Javadoc). Set to NO to disable this.
 # The default value is: YES.
 
 BRIEF_MEMBER_DESC      = YES
 
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
 # description of a member or function before the detailed description
 #
 # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
@@ -137,7 +135,7 @@ ALWAYS_DETAILED_SEC    = NO
 
 INLINE_INHERITED_MEMB  = NO
 
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
 # before files name in the file list and in the header files. If set to NO the
 # shortest path that makes the file name unique will be used
 # The default value is: YES.
@@ -181,6 +179,16 @@ SHORT_NAMES            = NO
 
 JAVADOC_AUTOBRIEF      = NO
 
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER         = NO
+
 # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
 # line (until the first dot) of a Qt-style comment as the brief description. If
 # set to NO, the Qt-style will behave just like regular Qt-style comments (thus
@@ -201,15 +209,23 @@ QT_AUTOBRIEF           = NO
 
 MULTILINE_CPP_IS_BRIEF = NO
 
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING       = YES
+
 # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
 # documentation from any documented member that it re-implements.
 # The default value is: YES.
 
 INHERIT_DOCS           = YES
 
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
 # The default value is: NO.
 
 SEPARATE_MEMBER_PAGES  = NO
@@ -224,20 +240,19 @@ TAB_SIZE               = 4
 # the documentation. An alias has the form:
 # name=value
 # For example adding
-# "sideeffect=@par Side Effects:\n"
+# "sideeffect=@par Side Effects:^^"
 # will allow you to put the command \sideeffect (or @sideeffect) in the
 # documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
+# "Side Effects:". Note that you cannot put \n's in the value part of an alias
+# to insert newlines (in the resulting output). You can put ^^ in the value part
+# of an alias to insert a newline as if a physical newline was in the original
+# file. When you need a literal { or } or , in the value part of an alias you
+# have to escape them by means of a backslash (\), this can lead to conflicts
+# with the commands \{ and \} for these it is advised to use the version @{ and
+# @} or use a double escape (\\{ and \\})
 
 ALIASES                =
 
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
 # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
 # only. Doxygen will then generate output that is more tailored for C. For
 # instance, some of the names that are used will be different. The list of all
@@ -266,28 +281,40 @@ OPTIMIZE_FOR_FORTRAN   = NO
 
 OPTIMIZE_OUTPUT_VHDL   = NO
 
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE  = NO
+
 # Doxygen selects the parser to use depending on the extension of the files it
 # parses. With this tag you can assign which parser to use for a given
 # extension. Doxygen has a built-in mapping, but you can override or extend it
 # using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
+# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
 #
-# Note For files without extension you can use no_extension as a placeholder.
+# Note: For files without extension you can use no_extension as a placeholder.
 #
 # Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
 
 EXTENSION_MAPPING      =
 
 # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
 # according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
 # The output of markdown processing is further processed by doxygen, so you can
 # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
 # case of backward compatibilities issues.
@@ -295,10 +322,19 @@ EXTENSION_MAPPING      =
 
 MARKDOWN_SUPPORT       = YES
 
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS   = 5
+
 # When enabled doxygen tries to link words that correspond to documented
 # classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
 # The default value is: YES.
 
 AUTOLINK_SUPPORT       = YES
@@ -320,7 +356,7 @@ BUILTIN_STL_SUPPORT    = NO
 CPP_CLI_SUPPORT        = NO
 
 # Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
 # will parse them like normal C++ but will assume all classes use public instead
 # of private inheritance when no explicit protection keyword is present.
 # The default value is: NO.
@@ -338,13 +374,20 @@ SIP_SUPPORT            = NO
 IDL_PROPERTY_SUPPORT   = YES
 
 # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
+# tag is set to YES then doxygen will reuse the documentation of the first
 # member in the group (if any) for the other members of the group. By default
 # all members of a group must be documented explicitly.
 # The default value is: NO.
 
 DISTRIBUTE_GROUP_DOC   = NO
 
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
 # Set the SUBGROUPING tag to YES to allow class member groups of the same type
 # (for instance a group of public functions) to be put as a subgroup of that
 # type (e.g. under the Public Functions section). Set it to NO to prevent
@@ -399,11 +442,24 @@ TYPEDEF_HIDES_STRUCT   = NO
 
 LOOKUP_CACHE_SIZE      = 0
 
+# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which effectively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS       = 1
+
 #---------------------------------------------------------------------------
 # Build related configuration options
 #---------------------------------------------------------------------------
 
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
 # documentation are documented, even if no documentation was available. Private
 # class members and static file members will be hidden unless the
 # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
@@ -413,35 +469,41 @@ LOOKUP_CACHE_SIZE      = 0
 
 EXTRACT_ALL            = YES
 
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
 # be included in the documentation.
 # The default value is: NO.
 
 EXTRACT_PRIVATE        = NO
 
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL   = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
 # scope will be included in the documentation.
 # The default value is: NO.
 
 EXTRACT_PACKAGE        = NO
 
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
 # included in the documentation.
 # The default value is: NO.
 
 EXTRACT_STATIC         = NO
 
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
 # only classes defined in header files are included. Does not have any effect
 # for Java sources.
 # The default value is: YES.
 
 EXTRACT_LOCAL_CLASSES  = YES
 
-# This flag is only useful for Objective-C code. When set to YES local methods,
+# This flag is only useful for Objective-C code. If set to YES, local methods,
 # which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
 # included.
 # The default value is: NO.
 
@@ -456,6 +518,13 @@ EXTRACT_LOCAL_METHODS  = NO
 
 EXTRACT_ANON_NSPACES   = NO
 
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
 # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
 # undocumented members inside documented classes or files. If set to NO these
 # members will be included in the various overviews, but no documentation
@@ -466,21 +535,21 @@ HIDE_UNDOC_MEMBERS     = NO
 
 # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
 # undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
 # The default value is: NO.
 
 HIDE_UNDOC_CLASSES     = NO
 
 # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
-# included in the documentation.
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
 # The default value is: NO.
 
 HIDE_FRIEND_COMPOUNDS  = NO
 
 # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
+# documentation blocks found inside the body of a function. If set to NO, these
 # blocks will be appended to the function's detailed documentation block.
 # The default value is: NO.
 
@@ -493,22 +562,42 @@ HIDE_IN_BODY_DOCS      = NO
 
 INTERNAL_DOCS          = NO
 
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
 # The default value is: system dependent.
 
 CASE_SENSE_NAMES       = YES
 
 # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
+# their full class and namespace scopes in the documentation. If set to YES, the
 # scope will be hidden.
 # The default value is: NO.
 
 HIDE_SCOPE_NAMES       = NO
 
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
+# will show which file needs to be included to use the class.
+# The default value is: YES.
+
+SHOW_HEADERFILE        = YES
+
 # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
 # the files that are included by a file in the documentation of that file.
 # The default value is: YES.
@@ -536,14 +625,14 @@ INLINE_INFO            = YES
 
 # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
 # (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
+# name. If set to NO, the members will appear in declaration order.
 # The default value is: YES.
 
 SORT_MEMBER_DOCS       = NO
 
 # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
 # descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
+# name. If set to NO, the members will appear in declaration order. Note that
 # this will also influence the order of the classes in the class list.
 # The default value is: NO.
 
@@ -588,27 +677,25 @@ SORT_BY_SCOPE_NAME     = NO
 
 STRICT_PROTO_MATCHING  = NO
 
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
 # The default value is: YES.
 
 GENERATE_TODOLIST      = YES
 
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
 # The default value is: YES.
 
 GENERATE_TESTLIST      = YES
 
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
 # list. This list is created by putting \bug commands in the documentation.
 # The default value is: YES.
 
 GENERATE_BUGLIST       = YES
 
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
 # the deprecated list. This list is created by putting \deprecated commands in
 # the documentation.
 # The default value is: YES.
@@ -633,8 +720,8 @@ ENABLED_SECTIONS       =
 MAX_INITIALIZER_LINES  = 30
 
 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
 # The default value is: YES.
 
 SHOW_USED_FILES        = YES
@@ -668,7 +755,8 @@ FILE_VERSION_FILTER    =
 # output files in an output format independent way. To create the layout file
 # that represents doxygen's defaults, run doxygen with the -l option. You can
 # optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
+# will be used as the name of the layout file. See also section "Changing the
+# layout of pages" for information.
 #
 # Note that if you run doxygen from a directory containing a file called
 # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
@@ -679,7 +767,7 @@ LAYOUT_FILE            =
 # The CITE_BIB_FILES tag can be used to specify one or more bib files containing
 # the reference definitions. This must be a list of .bib files. The .bib
 # extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
 # For LaTeX the style of the bibliography can be controlled using
 # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
 # search path. See also \cite for info how to create references.
@@ -698,7 +786,7 @@ CITE_BIB_FILES         =
 QUIET                  = NO
 
 # The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
 # this implies that the warnings are on.
 #
 # Tip: Turn warnings on while writing the documentation.
@@ -706,7 +794,7 @@ QUIET                  = NO
 
 WARNINGS               = YES
 
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
 # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
 # will automatically be disabled.
 # The default value is: YES.
@@ -714,21 +802,39 @@ WARNINGS               = YES
 WARN_IF_UNDOCUMENTED   = YES
 
 # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
+# potential errors in the documentation, such as documenting some parameters in
+# a documented function twice, or documenting parameters that don't exist or
+# using markup commands wrongly.
 # The default value is: YES.
 
 WARN_IF_DOC_ERROR      = YES
 
+# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
+# function parameter documentation. If set to NO, doxygen will accept that some
+# parameters have no documentation without warning.
+# The default value is: YES.
+
+WARN_IF_INCOMPLETE_DOC = YES
+
 # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
 # are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
+# value. If set to NO, doxygen will only warn about wrong parameter
+# documentation, but not about the absence of documentation. If EXTRACT_ALL is
+# set to YES then this flag will automatically be disabled. See also
+# WARN_IF_INCOMPLETE_DOC
 # The default value is: NO.
 
 WARN_NO_PARAMDOC       = YES
 
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# Possible values are: NO, YES and FAIL_ON_WARNINGS.
+# The default value is: NO.
+
+WARN_AS_ERROR          = NO
+
 # The WARN_FORMAT tag determines the format of the warning messages that doxygen
 # can produce. The string should contain the $file, $line, and $text tags, which
 # will be replaced by the file and line number from which the warning originated
@@ -741,7 +847,10 @@ WARN_FORMAT            = "$file:$line: $text"
 
 # The WARN_LOGFILE tag can be used to specify a file to which warning and error
 # messages should be written. If left blank the output is written to standard
-# error (stderr).
+# error (stderr). In case the file specified cannot be opened for writing the
+# warning and error messages are written to standard error. When as file - is
+# specified the warning and error messages are written to standard output
+# (stdout).
 
 WARN_LOGFILE           =
 
@@ -752,31 +861,42 @@ WARN_LOGFILE           =
 # The INPUT tag is used to specify the files and/or directories that contain
 # documented source files. You may enter file names like myfile.cpp or
 # directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
 # Note: If this tag is empty the current directory is searched.
 
 INPUT                  = ../doc/main.dox \
+                         ../include/vision \
                          ../src
 
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
 # libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
 # The default value is: UTF-8.
 
 INPUT_ENCODING         = UTF-8
 
 # If the value of the INPUT tag contains directories, you can use the
 # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS          = *.h \
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
+# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
+# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
+# *.vhdl, *.ucf, *.qsf and *.ice.
+
+FILE_PATTERNS          = *.c \
+                         *.h \
                          *.cpp
 
 # The RECURSIVE tag can be used to specify whether or not subdirectories should
@@ -792,13 +912,8 @@ RECURSIVE              = YES
 # Note that relative paths are relative to the directory from which doxygen is
 # run.
 
-EXCLUDE                = ../src/examples \
-                         ../src/test \
-                         ../src/internal \
-                         ../src/scilab \
-                         ../src/serialization \
-                         ../src/solver \
-                         ../src/data_association
+EXCLUDE                = ../src/old \
+                         ../src/examples
 
 # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
 # directories that are symbolic links (a Unix file system feature) are excluded
@@ -847,7 +962,7 @@ EXCLUDE_PATTERNS       = parental.* \
 # (namespaces, classes, functions, etc.) that should be excluded from the
 # output. The symbol name can be a fully qualified name, a word, or if the
 # wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
+# ANamespace::AClass, ANamespace::*Test
 #
 # Note that the wildcards are matched against the file with absolute path, so to
 # exclude all test directories use the pattern */test/*
@@ -894,6 +1009,10 @@ IMAGE_PATH             = ../doc/images
 # Note that the filter must not add or remove lines; it is applied before the
 # code is scanned, but not when the output code is generated. If lines are added
 # or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
 
 INPUT_FILTER           =
 
@@ -903,11 +1022,15 @@ INPUT_FILTER           =
 # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
 # filters are used. If the FILTER_PATTERNS tag is empty or if none of the
 # patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
 
 FILTER_PATTERNS        =
 
 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
+# INPUT_FILTER) will also be used to filter the input files that are used for
 # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
 # The default value is: NO.
 
@@ -955,7 +1078,7 @@ INLINE_SOURCES         = NO
 STRIP_CODE_COMMENTS    = YES
 
 # If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
+# entity all documented functions referencing it will be listed.
 # The default value is: NO.
 
 REFERENCED_BY_RELATION = YES
@@ -967,7 +1090,7 @@ REFERENCED_BY_RELATION = YES
 REFERENCES_RELATION    = YES
 
 # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
 # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
 # link to the documentation.
 # The default value is: YES.
@@ -987,12 +1110,12 @@ SOURCE_TOOLTIPS        = YES
 # If the USE_HTAGS tag is set to YES then the references to source code will
 # point to the HTML generated by the htags(1) tool instead of doxygen built-in
 # source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
+# (see https://www.gnu.org/software/global/global.html). You will need version
 # 4.8.6 or higher.
 #
 # To use it do the following:
 # - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
 # - Make sure the INPUT points to the root of the source tree
 # - Run doxygen as normal
 #
@@ -1025,13 +1148,6 @@ VERBATIM_HEADERS       = YES
 
 ALPHABETICAL_INDEX     = YES
 
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
 # In case all classes in a project start with a common prefix, all classes will
 # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
 # can be used to specify a prefix (or a list of prefixes) that should be ignored
@@ -1044,7 +1160,7 @@ IGNORE_PREFIX          =
 # Configuration options related to the HTML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
 # The default value is: YES.
 
 GENERATE_HTML          = YES
@@ -1110,10 +1226,10 @@ HTML_STYLESHEET        =
 # cascading style sheets that are included after the standard style sheets
 # created by doxygen. Using this option one can overrule certain style aspects.
 # This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
+# standard style sheet and is therefore more robust against future updates.
 # Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra stylesheet files is of importance (e.g. the last
-# stylesheet in the list overrules the setting of the previous ones in the
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
 # list). For an example see the documentation.
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
@@ -1130,9 +1246,9 @@ HTML_EXTRA_STYLESHEET  =
 HTML_EXTRA_FILES       =
 
 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a color-wheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
 # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
 # purple, and 360 is red again.
 # Minimum value: 0, maximum value: 359, default value: 220.
@@ -1141,7 +1257,7 @@ HTML_EXTRA_FILES       =
 HTML_COLORSTYLE_HUE    = 220
 
 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
+# in the HTML output. For a value of 0 the output will use gray-scales only. A
 # value of 255 will produce the most vivid colors.
 # Minimum value: 0, maximum value: 255, default value: 100.
 # This tag requires that the tag GENERATE_HTML is set to YES.
@@ -1161,12 +1277,24 @@ HTML_COLORSTYLE_GAMMA  = 80
 
 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
 # page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_TIMESTAMP         = YES
 
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS     = YES
+
 # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
 # documentation will contain sections that can be hidden and shown after the
 # page has loaded.
@@ -1190,13 +1318,14 @@ HTML_INDEX_NUM_ENTRIES = 100
 
 # If the GENERATE_DOCSET tag is set to YES, additional index files will be
 # generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
 # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
 # The default value is: NO.
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
@@ -1210,6 +1339,13 @@ GENERATE_DOCSET        = NO
 
 DOCSET_FEEDNAME        = "Doxygen generated docs"
 
+# This tag determines the URL of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDURL         =
+
 # This tag specifies a string that should uniquely identify the documentation
 # set bundle. This should be a reverse domain-name style string, e.g.
 # com.mycompany.MyDocSet. Doxygen will append .docset to the name.
@@ -1235,8 +1371,12 @@ DOCSET_PUBLISHER_NAME  = Publisher
 # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
 # additional HTML index files: index.hhp, index.hhc, and index.hhk. The
 # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
+# on Windows. In the beginning of 2021 Microsoft took the original page, with
+# a.o. the download links, offline the HTML help workshop was already many years
+# in maintenance mode). You can download the HTML help workshop from the web
+# archives at Installation executable (see:
+# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
+# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
 #
 # The HTML Help Workshop contains a compiler that can convert all HTML output
 # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
@@ -1258,28 +1398,28 @@ GENERATE_HTMLHELP      = NO
 CHM_FILE               =
 
 # The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
 # doxygen will try to run the HTML help compiler on the generated index.hhp.
 # The file has to be specified with full path.
 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 HHC_LOCATION           =
 
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the main .chm file (NO).
 # The default value is: NO.
 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 GENERATE_CHI           = NO
 
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
 # and project file content.
 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_INDEX_ENCODING     =
 
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
 # enables the Previous and Next buttons.
 # The default value is: NO.
 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
@@ -1311,7 +1451,8 @@ QCH_FILE               =
 
 # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
 # Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
 # The default value is: org.doxygen.Project.
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
@@ -1319,8 +1460,8 @@ QHP_NAMESPACE          = org.doxygen.Project
 
 # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
 # Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
 # The default value is: doc.
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
@@ -1328,30 +1469,30 @@ QHP_VIRTUAL_FOLDER     = doc
 
 # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
 # filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_NAME   =
 
 # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
 # custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_ATTRS  =
 
 # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
 # project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_SECT_FILTER_ATTRS  =
 
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
 # This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHG_LOCATION           =
@@ -1393,17 +1534,29 @@ DISABLE_INDEX          = NO
 # index structure (just like the one that is generated for HTML Help). For this
 # to work a browser that supports JavaScript, DHTML, CSS and frames is required
 # (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine tune the look of the index (see "Fine-tuning the output"). As an
+# example, the default style sheet generated by doxygen has an example that
+# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
+# Since the tree basically has the same information as the tab index, you could
+# consider setting DISABLE_INDEX to YES when enabling this option.
 # The default value is: NO.
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_TREEVIEW      = NO
 
+# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
+# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
+# area (value NO) or if it should extend to the full height of the window (value
+# YES). Setting this to YES gives a layout similar to
+# https://docs.readthedocs.io with more room for contents, but less room for the
+# project logo, title, and description. If either GENERATE_TREEVIEW or
+# DISABLE_INDEX is set to NO, this option has no effect.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FULL_SIDEBAR           = NO
+
 # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
 # doxygen will group on one line in the generated HTML documentation.
 #
@@ -1421,13 +1574,31 @@ ENUM_VALUES_PER_LINE   = 4
 
 TREEVIEW_WIDTH         = 250
 
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
 # external symbols imported via tag files in a separate window.
 # The default value is: NO.
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
 EXT_LINKS_IN_WINDOW    = NO
 
+# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
+# addresses.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+OBFUSCATE_EMAILS       = YES
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT    = png
+
 # Use this tag to change the font size of LaTeX formulas included as images in
 # the HTML documentation. When you change the font size after a successful
 # doxygen run you need to manually remove any form_*.png images from the HTML
@@ -1437,7 +1608,7 @@ EXT_LINKS_IN_WINDOW    = NO
 
 FORMULA_FONTSIZE       = 10
 
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
 # generated for formulas are transparent PNGs. Transparent PNGs are not
 # supported properly for IE 6.0, but are supported on all modern browsers.
 #
@@ -1448,9 +1619,15 @@ FORMULA_FONTSIZE       = 10
 
 FORMULA_TRANSPARENT    = YES
 
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE      =
+
 # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
 # installed or if you want to formulas look prettier in the HTML output. When
 # enabled you may also need to install MathJax separately and configure the path
 # to it using the MATHJAX_RELPATH option.
@@ -1459,11 +1636,29 @@ FORMULA_TRANSPARENT    = YES
 
 USE_MATHJAX            = NO
 
+# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
+# Note that the different versions of MathJax have different requirements with
+# regards to the different settings, so it is possible that also other MathJax
+# settings have to be changed when switching between the different MathJax
+# versions.
+# Possible values are: MathJax_2 and MathJax_3.
+# The default value is: MathJax_2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_VERSION        = MathJax_2
+
 # When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
+# the MathJax output. For more details about the output format see MathJax
+# version 2 (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
+# (see:
+# http://docs.mathjax.org/en/latest/web/components/output.html).
 # Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
+# compatibility. This is the name for Mathjax version 2, for MathJax version 3
+# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
+# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
+# is the name for Mathjax version 3, for MathJax version 2 this will be
+# translated into HTML-CSS) and SVG.
 # The default value is: HTML-CSS.
 # This tag requires that the tag USE_MATHJAX is set to YES.
 
@@ -1476,22 +1671,29 @@ MATHJAX_FORMAT         = HTML-CSS
 # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
 # Content Delivery Network so you can quickly see the result without installing
 # MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# MathJax from https://www.mathjax.org before deployment. The default value is:
+# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
+# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
 # This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
 
 # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
 # extension names that should be enabled during MathJax rendering. For example
+# for MathJax version 2 (see https://docs.mathjax.org/en/v2.7-latest/tex.html
+# #tex-and-latex-extensions):
 # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# For example for MathJax version 3 (see
+# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
+# MATHJAX_EXTENSIONS = ams
 # This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_EXTENSIONS     =
 
 # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
 # of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
 # example see the documentation.
 # This tag requires that the tag USE_MATHJAX is set to YES.
 
@@ -1519,7 +1721,7 @@ MATHJAX_CODEFILE       =
 SEARCHENGINE           = YES
 
 # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
+# implemented using a web server instead of a web client using JavaScript. There
 # are two flavors of web server based searching depending on the EXTERNAL_SEARCH
 # setting. When disabled, doxygen will generate a PHP script for searching and
 # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
@@ -1536,9 +1738,10 @@ SERVER_BASED_SEARCH    = NO
 # external search engine pointed to by the SEARCHENGINE_URL option to obtain the
 # search results.
 #
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
 # (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
+# Xapian (see:
+# https://xapian.org/).
 #
 # See the section "External Indexing and Searching" for details.
 # The default value is: NO.
@@ -1549,10 +1752,11 @@ EXTERNAL_SEARCH        = NO
 # The SEARCHENGINE_URL should point to a search engine hosted by a web server
 # which will return the search results when EXTERNAL_SEARCH is enabled.
 #
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
 # (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
+# Xapian (see:
+# https://xapian.org/). See the section "External Indexing and Searching" for
+# details.
 # This tag requires that the tag SEARCHENGINE is set to YES.
 
 SEARCHENGINE_URL       =
@@ -1587,7 +1791,7 @@ EXTRA_SEARCH_MAPPINGS  =
 # Configuration options related to the LaTeX output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
 # The default value is: YES.
 
 GENERATE_LATEX         = NO
@@ -1603,22 +1807,36 @@ LATEX_OUTPUT           = latex
 # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
 # invoked.
 #
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
+# Note that when not enabling USE_PDFLATEX the default is latex when enabling
+# USE_PDFLATEX the default is pdflatex and when in the later case latex is
+# chosen this is overwritten by pdflatex. For specific output languages the
+# default can have been set differently, this depends on the implementation of
+# the output language.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_CMD_NAME         = latex
 
 # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
 # index for LaTeX.
+# Note: This tag is used in the Makefile / make.bat.
+# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
+# (.tex).
 # The default file is: makeindex.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 MAKEINDEX_CMD_NAME     = makeindex
 
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
+# Note: This tag is used in the generated output file (.tex).
+# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
+# The default value is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_MAKEINDEX_CMD    = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
 # documents. This may be useful for small projects and may help to save some
 # trees in general.
 # The default value is: NO.
@@ -1636,41 +1854,57 @@ COMPACT_LATEX          = NO
 PAPER_TYPE             = a4
 
 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
 # If left blank no extra packages will be included.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 EXTRA_PACKAGES         =
 
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
+# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
+# the generated LaTeX document. The header should contain everything until the
+# first chapter. If it is left blank doxygen will generate a standard header. It
+# is highly recommended to start with a default header using
+# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
+# and then modify the file new_header.tex. See also section "Doxygen usage" for
+# information on how to generate the default header that doxygen normally uses.
 #
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
-# for the replacement values of the other commands the user is refered to
-# HTML_HEADER.
+# Note: Only use a user-defined header if you know what you are doing!
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. The following
+# commands have a special meaning inside the header (and footer): For a
+# description of the possible markers and block names see the documentation.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HEADER           =
 
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
+# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
+# the generated LaTeX document. The footer should contain everything after the
+# last chapter. If it is left blank doxygen will generate a standard footer. See
 # LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
+# special commands can be used inside the footer. See also section "Doxygen
+# usage" for information on how to generate the default footer that doxygen
+# normally uses. Note: Only use a user-defined footer if you know what you are
+# doing!
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_FOOTER           =
 
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
 # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
 # other source files which should be copied to the LATEX_OUTPUT output
 # directory. Note that the files will be copied as-is; there are no commands or
@@ -1688,9 +1922,11 @@ LATEX_EXTRA_FILES      =
 
 PDF_HYPERLINKS         = YES
 
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
-# higher quality PDF documentation.
+# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
+# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
+# files. Set this option to YES, to get a higher quality PDF documentation.
+#
+# See also section LATEX_CMD_NAME for selecting the engine.
 # The default value is: YES.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
@@ -1698,8 +1934,7 @@ USE_PDFLATEX           = NO
 
 # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
 # command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
+# if errors occur, instead of asking the user for help.
 # The default value is: NO.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
@@ -1712,29 +1947,35 @@ LATEX_BATCHMODE        = NO
 
 LATEX_HIDE_INDICES     = NO
 
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
 # The LATEX_BIB_STYLE tag can be used to specify the style to use for the
 # bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
 # The default value is: plain.
 # This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BIB_STYLE        = plain
 
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP        = NO
+
+# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
+# path from which the emoji images will be read. If a relative path is entered,
+# it will be relative to the LATEX_OUTPUT directory. If left blank the
+# LATEX_OUTPUT directory will be used.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EMOJI_DIRECTORY  =
+
 #---------------------------------------------------------------------------
 # Configuration options related to the RTF output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
 # RTF output is optimized for Word 97 and may not look too pretty with other RTF
 # readers/editors.
 # The default value is: NO.
@@ -1749,7 +1990,7 @@ GENERATE_RTF           = NO
 
 RTF_OUTPUT             = rtf
 
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
 # documents. This may be useful for small projects and may help to save some
 # trees in general.
 # The default value is: NO.
@@ -1769,9 +2010,9 @@ COMPACT_RTF            = NO
 
 RTF_HYPERLINKS         = NO
 
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# configuration file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
 #
 # See also section "Doxygen usage" for information on how to generate the
 # default style sheet that doxygen normally uses.
@@ -1780,8 +2021,8 @@ RTF_HYPERLINKS         = NO
 RTF_STYLESHEET_FILE    =
 
 # Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
+# similar to doxygen's configuration file. A template extensions file can be
+# generated using doxygen -e rtf extensionFile.
 # This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_EXTENSIONS_FILE    =
@@ -1790,7 +2031,7 @@ RTF_EXTENSIONS_FILE    =
 # Configuration options related to the man page output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
 # classes and files.
 # The default value is: NO.
 
@@ -1834,7 +2075,7 @@ MAN_LINKS              = NO
 # Configuration options related to the XML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
 # captures the structure of the code including all documentation.
 # The default value is: NO.
 
@@ -1848,7 +2089,7 @@ GENERATE_XML           = NO
 
 XML_OUTPUT             = xml
 
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
 # listings (including syntax highlighting and cross-referencing information) to
 # the XML output. Note that enabling this will significantly increase the size
 # of the XML output.
@@ -1857,11 +2098,18 @@ XML_OUTPUT             = xml
 
 XML_PROGRAMLISTING     = YES
 
+# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
+# namespace members in file scope as well, matching the HTML output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_NS_MEMB_FILE_SCOPE = NO
+
 #---------------------------------------------------------------------------
 # Configuration options related to the DOCBOOK output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
 # that can be used to generate PDF.
 # The default value is: NO.
 
@@ -1875,23 +2123,14 @@ GENERATE_DOCBOOK       = NO
 
 DOCBOOK_OUTPUT         = docbook
 
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
 #---------------------------------------------------------------------------
 # Configuration options for the AutoGen Definitions output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# the structure of the code including all documentation. Note that this feature
+# is still experimental and incomplete at the moment.
 # The default value is: NO.
 
 GENERATE_AUTOGEN_DEF   = NO
@@ -1900,7 +2139,7 @@ GENERATE_AUTOGEN_DEF   = NO
 # Configuration options related to the Perl module output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
 # file that captures the structure of the code including all documentation.
 #
 # Note that this feature is still experimental and incomplete at the moment.
@@ -1908,7 +2147,7 @@ GENERATE_AUTOGEN_DEF   = NO
 
 GENERATE_PERLMOD       = NO
 
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
 # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
 # output from the Perl module output.
 # The default value is: NO.
@@ -1916,9 +2155,9 @@ GENERATE_PERLMOD       = NO
 
 PERLMOD_LATEX          = NO
 
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
 # formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
+# understand what is going on. On the other hand, if this tag is set to NO, the
 # size of the Perl module output will be much smaller and Perl will parse it
 # just the same.
 # The default value is: YES.
@@ -1938,14 +2177,14 @@ PERLMOD_MAKEVAR_PREFIX =
 # Configuration options related to the preprocessor
 #---------------------------------------------------------------------------
 
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
 # C-preprocessor directives found in the sources and include files.
 # The default value is: YES.
 
 ENABLE_PREPROCESSING   = YES
 
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
 # performed. Macro expansion can be done in a controlled way by setting
 # EXPAND_ONLY_PREDEF to YES.
 # The default value is: NO.
@@ -1961,7 +2200,7 @@ MACRO_EXPANSION        = NO
 
 EXPAND_ONLY_PREDEF     = NO
 
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
 # INCLUDE_PATH will be searched if a #include is found.
 # The default value is: YES.
 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
@@ -2037,54 +2276,31 @@ TAGFILES               =
 
 GENERATE_TAGFILE       =
 
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
 # The default value is: NO.
 
 ALLEXTERNALS           = NO
 
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
 # listed.
 # The default value is: YES.
 
 EXTERNAL_GROUPS        = YES
 
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
 # the related pages index. If set to NO, only the current project's pages will
 # be listed.
 # The default value is: YES.
 
 EXTERNAL_PAGES         = YES
 
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH              = /usr/bin/perl
-
 #---------------------------------------------------------------------------
 # Configuration options related to the dot tool
 #---------------------------------------------------------------------------
 
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS         = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
 # You can include diagrams made with dia in doxygen documentation. Doxygen will
 # then run dia to produce the diagram and insert it in the documentation. The
 # DIA_PATH tag allows you to specify the directory where the dia binary resides.
@@ -2092,7 +2308,7 @@ MSCGEN_PATH            =
 
 DIA_PATH               =
 
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# If set to YES the inheritance and collaboration graphs will hide inheritance
 # and usage relations if the target is undocumented or is not a class.
 # The default value is: YES.
 
@@ -2141,11 +2357,14 @@ DOT_FONTSIZE           = 10
 
 DOT_FONTPATH           =
 
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a
+# graph for each documented class showing the direct and indirect inheritance
+# relations. In case HAVE_DOT is set as well dot will be used to draw the graph,
+# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set
+# to TEXT the direct and indirect inheritance relations will be shown as texts /
+# links.
+# Possible values are: NO, YES, TEXT and GRAPH.
 # The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
 
 CLASS_GRAPH            = YES
 
@@ -2165,7 +2384,7 @@ COLLABORATION_GRAPH    = YES
 
 GROUP_GRAPHS           = YES
 
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
 # collaboration diagrams in a style similar to the OMG's Unified Modeling
 # Language.
 # The default value is: NO.
@@ -2182,10 +2401,32 @@ UML_LOOK               = NO
 # but if the number exceeds 15, the total amount of fields shown is limited to
 # 10.
 # Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
+# This tag requires that the tag UML_LOOK is set to YES.
 
 UML_LIMIT_NUM_FIELDS   = 10
 
+# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
+# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
+# tag is set to YES, doxygen will add type and arguments for attributes and
+# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
+# will not generate fields with class member information in the UML graphs. The
+# class diagrams will look similar to the default class diagrams but using UML
+# notation for the relationships.
+# Possible values are: NO, YES and NONE.
+# The default value is: NO.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+DOT_UML_DETAILS        = NO
+
+# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
+# to display on a single line. If the actual line length exceeds this threshold
+# significantly it will wrapped across multiple lines. Some heuristics are apply
+# to avoid ugly line breaks.
+# Minimum value: 0, maximum value: 1000, default value: 17.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_WRAP_THRESHOLD     = 17
+
 # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
 # collaboration graphs will show the relations between templates and their
 # instances.
@@ -2217,7 +2458,8 @@ INCLUDED_BY_GRAPH      = YES
 #
 # Note that enabling this option will significantly increase the time of a run.
 # So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
 # The default value is: NO.
 # This tag requires that the tag HAVE_DOT is set to YES.
 
@@ -2228,7 +2470,8 @@ CALL_GRAPH             = NO
 #
 # Note that enabling this option will significantly increase the time of a run.
 # So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
 # The default value is: NO.
 # This tag requires that the tag HAVE_DOT is set to YES.
 
@@ -2250,12 +2493,23 @@ GRAPHICAL_HIERARCHY    = YES
 
 DIRECTORY_GRAPH        = NO
 
+# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
+# of child directories generated in directory dependency graphs by dot.
+# Minimum value: 1, maximum value: 25, default value: 1.
+# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
+
+DIR_GRAPH_MAX_DEPTH    = 1
+
 # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
 # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
 # to make the SVG files visible in IE 9+ (other browsers do not have this
 # requirement).
-# Possible values are: png, jpg, gif and svg.
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
 # The default value is: png.
 # This tag requires that the tag HAVE_DOT is set to YES.
 
@@ -2299,14 +2553,23 @@ MSCFILE_DIRS           =
 DIAFILE_DIRS           =
 
 # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
-# path where java can find the plantuml.jar file. If left blank, it is assumed
-# PlantUML is not used or called during a preprocessing step. Doxygen will
-# generate a warning when it encounters a \startuml command in this case and
-# will not generate output for the diagram.
-# This tag requires that the tag HAVE_DOT is set to YES.
+# path where java can find the plantuml.jar file or to the filename of jar file
+# to be used. If left blank, it is assumed PlantUML is not used or called during
+# a preprocessing step. Doxygen will generate a warning when it encounters a
+# \startuml command in this case and will not generate output for the diagram.
 
 PLANTUML_JAR_PATH      =
 
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE      =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH  =
+
 # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
 # that will be shown in the graph. If the number of nodes in a graph becomes
 # larger than this value, doxygen will truncate the graph, which is visualized
@@ -2343,7 +2606,7 @@ MAX_DOT_GRAPH_DEPTH    = 2
 
 DOT_TRANSPARENT        = YES
 
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
 # files in one run (i.e. multiple -o and -T options on the command line). This
 # makes dot run faster, but since only newer versions of dot (>1.8.10) support
 # this, this feature is disabled by default.
@@ -2355,14 +2618,18 @@ DOT_MULTI_TARGETS      = YES
 # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
 # explaining the meaning of the various boxes and arrows in the dot generated
 # graphs.
+# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal
+# graphical representation for inheritance and collaboration diagrams is used.
 # The default value is: YES.
 # This tag requires that the tag HAVE_DOT is set to YES.
 
 GENERATE_LEGEND        = YES
 
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
 # files that are used to generate the various graphs.
+#
+# Note: This setting is not only used for dot files but also for msc temporary
+# files.
 # The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_CLEANUP            = YES
diff --git a/doc/doxygen.conf.bak b/doc/doxygen.conf.bak
new file mode 100644
index 0000000000000000000000000000000000000000..852fb79d2874a0587cfafecf4973a80bcbd22b8d
--- /dev/null
+++ b/doc/doxygen.conf.bak
@@ -0,0 +1,2364 @@
+# Doxyfile 1.8.8
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "WOLF"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = 
+# "A lion and a tiger might be stronger, but a wolf does not perform in circus"
+# Windowed Localization Frames
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           = ../doc/images/wolf-120.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = ../doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = NO
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = YES
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = ../doc/main.dox \
+                         ../src
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          = *.c \
+                         *.h \
+                         *.cpp
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                = ../src/old \
+                         ../src/examples
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       = parental.* \
+                         test_branch*.* \
+                         test_circ*.* \
+                         test_error* \
+                         test_graph* \
+                         test_hete*.* \
+                         test_map* \
+                         test_mi* \
+                         test_multimap* \
+                         test_node*.* \
+                         test_parent*.* \
+                         test_Sensor* \
+                         test_point* \
+                         test_quat* \
+                         test_shared*.* \
+                         test_states* \
+                         test_traj*.* \
+                         test_unor*.* \
+                         test_up_down* \
+                         test_wolf_tree* \
+                         *.tab.c \
+                         *.tab.h \
+                         lex* \
+                         *glr.h \
+                         *llr.h \
+                         *glr.c \
+                         *llr.c \
+                         *general.h
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           = ../src/examples
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             = ../doc/images
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra stylesheet files is of importance (e.g. the last
+# stylesheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
+# for the replacement values of the other commands the user is refered to
+# HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             = _USE_MPI=1
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = NO
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+PLANTUML_JAR_PATH      =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 20
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 2
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
diff --git a/doc/images/AddKF.png b/doc/images/AddKF.png
deleted file mode 100644
index 7d8c324f8c1290bc5ea591460c43310985d949c1..0000000000000000000000000000000000000000
Binary files a/doc/images/AddKF.png and /dev/null differ
diff --git a/doc/images/KFWindow.png b/doc/images/KFWindow.png
deleted file mode 100644
index 09bf35045b9ef55f91047fc74a5836bff6f256b9..0000000000000000000000000000000000000000
Binary files a/doc/images/KFWindow.png and /dev/null differ
diff --git a/doc/images/StateInheritances.png b/doc/images/StateInheritances.png
deleted file mode 100644
index 796304a64e38deb3156baee7782144377b8bdd62..0000000000000000000000000000000000000000
Binary files a/doc/images/StateInheritances.png and /dev/null differ
diff --git a/doc/images/TrajectoryStructure.png b/doc/images/TrajectoryStructure.png
deleted file mode 100644
index cdc23004d9ea06f7c29871c23083937ac91b1c73..0000000000000000000000000000000000000000
Binary files a/doc/images/TrajectoryStructure.png and /dev/null differ
diff --git a/doc/images/red-wolf.png b/doc/images/red-wolf.png
deleted file mode 100644
index 23e65f8f095ec0fca270954ec9015a3eb8b91e30..0000000000000000000000000000000000000000
Binary files a/doc/images/red-wolf.png and /dev/null differ
diff --git a/doc/interpolate.rtf b/doc/interpolate.rtf
deleted file mode 100644
index f64c7d77940675ec18b62d6428fe42eaaa376c58..0000000000000000000000000000000000000000
--- a/doc/interpolate.rtf
+++ /dev/null
@@ -1,263 +0,0 @@
-{\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf470
-{\fonttbl\f0\fnil\fcharset0 Monaco;}
-{\colortbl;\red255\green255\blue255;\red63\green127\blue95;\red127\green159\blue191;}
-\paperw11900\paperh16840\margl1440\margr1440\vieww10800\viewh8400\viewkind0
-\deftab720
-\pard\pardeftab720\partightenfactor0
-
-\f0\fs24 \cf0         \cf2 /** \\brief Interpolate motion to an intermediate time-stamp\cf0 \
-\cf2          *\cf0 \
-\cf2          * @\ul param\ulnone  _ref    The motion reference\cf0 \
-\cf2          * @\ul param\ulnone  _second The second motion. It is modified by the function (see documentation below).\cf0 \
-\cf2          * @\ul param\ulnone  _ts     The intermediate time stamp: it must be bounded by  `_ref.ts_ <= _ts <= _second.ts_`.\cf0 \
-\cf2          * @return        The interpolated motion (see documentation below).\cf0 \
-\cf2          *\cf0 \
-\cf2          * This function interpolates a motion between two existing motions.\cf0 \
-\cf2          *\cf0 \
-\cf2          * In particular, given a reference motion `R=_ref` at time `t_R`,\cf0 \
-\cf2          * a final motion `F=_second` at time `t_F`, and an interpolation time `t_I=_ts`,\cf0 \
-\cf2          * we search for the two interpolate motions `I` and `S` such that:\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - `I` is the motion between `t_R` and `t_I`\cf0 \
-\cf2          *   - `S` is the motion between `t_I` and `t_F`\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Rationale\cf0 \
-\cf2          *\cf0 \
-\cf2          * Let us name\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     R = _ref      // initial motion where interpolation starts\cf0 \
-\cf2          *     F = _second   // final motion where interpolation ends\cf0 \
-\cf2          *\cf0 \
-\cf2          * and let us define\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     t_R            // \ul timestamp\ulnone  at R\cf0 \
-\cf2          *     t_F            // \ul timestamp\ulnone  at F\cf0 \
-\cf2          *     t_I = _ts      // time stamp where interpolation is queried.\cf0 \
-\cf2          *\cf0 \
-\cf2          * We can introduce the results of the interpolation as\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     I = motion_interpolated // from t_R to t_I\cf0 \
-\cf2          *     S = motion_second       // from t_I to t_F\cf0 \
-\cf2          *\cf0 \
-\cf2          * The Motion structure in wolf has the following members (among others; see below):\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     ts_           // time stamp\cf0 \
-\cf2          *     delta_        // relative motion between the previous motion and this one. It might be seen as a local motion.\cf0 \
-\cf2          *     delta_integr_ // integration of relative deltas, since some origin. It might be seen as a globally defined motion.\cf0 \
-\cf2          *\cf0 \
-\cf2          * In this documentation, we differentiate these deltas with lower-case d and upper-case D:\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     d = any_motion.delta_            // local delta, from previous to this\cf0 \
-\cf2          *     D = any_motion.delta_integr_     // global Delta, from origin to this\cf0 \
-\cf2          *\cf0 \
-\cf2          * so that `D_(i+1) = D_(i) (+) d_(i+1)`, where (i) is in \{R, I, S\} and (i+1) is in \{I, S, F\}\cf0 \
-\cf2          *\cf0 \
-\cf2          * NOTE: the operator (+) is implemented as `deltaPlusDelta()` in each class deriving from this.\cf0 \
-\cf2          *\cf0 \
-\cf2          * This is a schematic sketch of the situation (see more explanations below),\cf0 \
-\cf2          * before and after calling `interpolate()`:\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     BEFORE             _ref         _ts       _second        variable names\cf0 \
-\cf2          *        ------+-----------+-----------+-----------+----->     time scale\cf0 \
-\cf2          *            origin        R                       F           motion short names\cf0 \
-\cf2          *           t_origin      t_R         t_I         t_F          time stamps\cf0 \
-\cf2          *                          0          tau          1           \ul interp\ulnone . factor\cf0 \
-\cf2          *              +----D_R----+----------d_F----------+           D_R (+) d_F\cf0 \
-\cf2          *              +----------------D_F----------------+           D_F = D_R (+) d_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *     AFTER              _ref        return     _second        variable names and return value\cf0 \
-\cf2          *        ------+-----------+-----------+-----------+----->     time scale\cf0 \
-\cf2          *                          R           I           S           motion short names\cf0 \
-\cf2          *              +----D_R----+----d_I----+----d_S----+           D_R (+) d_I (+) d_S\cf0 \
-\cf2          *              +----------D_I----------+----d_S----+           D_I (+) d_S\cf0 \
-\cf2          *              +----------------D_S----------------+           D_S = D_I (+) d_S = D_R (+) d_I (+) d_S\cf0 \
-\cf2          *\cf0 \
-\cf2          * where '`origin`' exists somewhere, but it is irrelevant for the operation of the interpolation.\cf0 \
-\cf2          * According to the schematic, and assuming a generic composition operator (+), the motion composition satisfies\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *   d_I (+) d_S = d_F      (1)\cf0 \
-\cf2          *\cf0 \
-\cf2          * from where `d_I` and `d_S` are first derived. Then, the integrated deltas satisfy\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *   D_I = D_R (+) d_I      (2)\cf0 \
-\cf2          *   D_S = D_F              (3)\cf0 \
-\cf2          *\cf0 \
-\cf2          * from where `D_I` and `D_S` can be derived.\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Interpolating `d_I`\cf0 \
-\cf2          *\cf0 \
-\cf2          * Equation (1) has two unknowns, `d_I` and `d_S`.\cf0 \
-\cf2          * To solve, we first need to consider the interpolation time,\cf0 \
-\cf2          * `t_I`, that determines `d_I`.\cf0 \
-\cf2          *\cf0 \
-\cf2          * In general, we do not have information about the particular trajectory\cf0 \
-\cf2          * taken between `R = _ref` and `F = _second`.\cf0 \
-\cf2          * Therefore, we consider a linear interpolation.\cf0 \
-\cf2          * The linear interpolation factor `tau` is defined from the time stamps,\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     tau = (t_I - t_R) / (t_F - t_R)\cf0 \
-\cf2          *\cf0 \
-\cf2          * such that for `tau=0` we are at `R`, and for `tau=1` we are at `F`.\cf0 \
-\cf2          *\cf0 \
-\cf2          * Conceptually, we want an interpolation such that the local motion 'd' takes the fraction,\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *   d_I = tau (*) d_F       // the fraction of the local delta\cf0 \
-\cf2          *\cf0 \
-\cf2          * where again the operator (*) needs to be defined properly.\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Defining the operators (*), (+), and (-)\cf0 \
-\cf2          *\cf0 \
-\cf2          * We often break down these 'd' and 'D' deltas into chunks of data, e.g.\cf0 \
-\cf2          *\cf0 \
-\cf2          *     \ul dp\ulnone  = delta of position\cf0 \
-\cf2          *     \ul Dp\ulnone  = delta integrated of position\cf0 \
-\cf2          *     \ul dq\ulnone  = delta of \ul quaternion\cf0 \ulnone \
-\cf2          *     \ul Da\ulnone  = delta integrated of orientation angle\cf0 \
-\cf2          *     etc...\cf0 \
-\cf2          *\cf0 \
-\cf2          * which makes it easier to define the operators (+) and (*).\cf0 \
-\cf2          * In effect, defining (*) is now easy. We examine them below.\cf0 \
-\cf2          *\cf0 \
-\cf2          * #### Operator (*)\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - for linear magnitudes, (*) is the regular product *:\cf0 \
-\cf2          *\cf0 \
-\cf2          *         dv_I = tau * dv_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - for simple angles, (*) is the regular product:\cf0 \
-\cf2          *\cf0 \
-\cf2          *         da_I = tau * da_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - for \ul quaternions\ulnone , we use \ul slerp\ulnone ():\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dq_I = 1.\ul slerp\ulnone (tau, dq_F) // '1' is the unit \ul quaternion\cf0 \ulnone \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          * #### Operator (+)\cf0 \
-\cf2          *\cf0 \
-\cf2          * As for the operator (+), we simply make use of `deltaPlusDelta()`, which is implemented in each derived class.\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          * #### Operator (-)\cf0 \
-\cf2          *\cf0 \
-\cf2          * For the operator (-) we have two options. We might implement it explicitly inside interpolate(),\cf0 \
-\cf2          * or through a `deltaMinusDelta()` defined akin to `deltaPlusDelta()`.\cf0 \
-\cf2          *\cf0 \
-\cf2          * By now, this `deltaMinusDelta()` is not enforced by this class as an abstract method,\cf0 \
-\cf2          * and its implementation in derived classes is up to the user.\cf0 \
-\cf2          *\cf0 \
-\cf2          * The general rule for the operator (-) is that it is the inverse of (+).\cf0 \
-\cf2          * But this needs to be taken carefully, since (+) is not commutative in the general case.\cf0 \
-\cf2          * therefore, we must define this inverse in the following way:\cf0 \
-\cf2          *\cf0 \
-\cf2          *     C = A (+) B  <==> B = C (-) A        (4)\cf0 \
-\cf2          *\cf0 \
-\cf2          *           A\cf0 \
-\cf2          *         o--->o\cf0 \
-\cf2          *          \\   |\cf0 \
-\cf2          *         C \\  | B\cf0 \
-\cf2          *            \\ v\cf0 \
-\cf2          *              o\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Computing `d_S`\cf0 \
-\cf2          *\cf0 \
-\cf2          * Applying (1), we can define\cf0 \
-\cf2          *\cf0 \
-\cf2          *     d_S = d_F (-) d_I\cf0 \
-\cf2          *\cf0 \
-\cf2          * with the (-) operator defined according to (4), that is, `d_F = d_I (+) d_S`, as can be also observed in the sketch above.\cf0 \
-\cf2          *\cf0 \
-\cf2          * For simple pose increments, we can use a local implementation:\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - for 2D\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_S = dR_I.tr * (1-tau)*dp_F      // dR is the rotation matrix of the angle delta '\ul da\ulnone '; '\ul tr\ulnone ' is transposed\cf0 \
-\cf2          *     da_S = dR_I.tr * (1-tau)*da_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *   - for 3D\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_S = dq_I.conj * (1-tau)*dp_F    // \ul dq\ulnone  is a \ul quaternion\ulnone ; '\ul conj\ulnone ' is the \ul conjugate\ulnone  \ul quaternion\ulnone .\cf0 \
-\cf2          *     dq_S = dq_I.conj * dq_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          * Please refer to the examples at the end of this documentation for the computation of `d_S`.\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Computing `D_I`\cf0 \
-\cf2          *\cf0 \
-\cf2          * Conceptually, the global motion 'D' is interpolated, that is:\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_I = (1-tau) (*) D_R (+) tau (*) D_F  // the interpolation of the global Delta\cf0 \
-\cf2          *\cf0 \
-\cf2          * However, we better make use of (2) and write\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_I = D_R (+) d_I\cf0 \
-\cf2          *         = deltaPlusDelta(D_R, d_I)         // This form provides an easy implementation.\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### Examples\cf0 \
-\cf2          *\cf0 \
-\cf2          * #### Example 1: For 2D poses\cf0 \
-\cf2          *\cf0 \
-\cf2          *\cf0 \
-\cf2          *     t_I  = _ts                         // time stamp of the interpolated motion\cf0 \
-\cf2          *     tau = (t_I - t_R) / (t_F - t_R)    // interpolation factor\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_I = tau*dp_F                    // \ul dp\ulnone  is a 2-vector\cf0 \
-\cf2          *     da_I = tau*da_F                    // \ul da\ulnone  is an angle, for 2D poses\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_I  = deltaPlusDelta(D_R, d_I)\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_S = dR_I.tr * (1-tau)*dp_F      // dR.tr is the transposed rotation matrix corresponding to '\ul da\ulnone ' above\cf0 \
-\cf2          *     da_S = dR_I.tr * (1-tau)*da_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_S  = D_F\cf0 \
-\cf2          *\cf0 \
-\cf2          * #### Example 2: For 3D poses\cf0 \
-\cf2          *\cf0 \
-\cf2          * Orientation is in \ul quaternion\ulnone  form, which is the best for interpolation using `\ul slerp\ulnone ()` :\cf0 \
-\cf2          *\cf0 \
-\cf2          *     t_I  = _ts                         // time stamp of the interpolated motion\cf0 \
-\cf2          *     tau = (t_I - t_R) / (t_F - t_R)    // interpolation factor\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_I = tau*dp_F                    // \ul dp\ulnone  is a 3-vector\cf0 \
-\cf2          *     dq_I = 1.\ul slerp\ulnone (tau, dq_F)          // '1' is the identity \ul quaternion\ulnone ; \ul slerp\ulnone () interpolates 3D rotation.\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_I  = deltaPlusDelta(D_R, d_I)\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dp_S = dq_I.conj * (1-tau)*dp_F    // \ul dq\ulnone  is a \ul quaternion\ulnone ; '\ul conj\ulnone ' is the \ul conjugate\ulnone  \ul quaternion\ulnone .\cf0 \
-\cf2          *     dq_S = dq_I.conj * dq_F\cf0 \
-\cf2          *\cf0 \
-\cf2          *     D_S  = D_F\cf0 \
-\cf2          *\cf0 \
-\cf2          */\cf0 \
-        \cf2 /* //\cf3 TODO\cf2 : JS: Remove these instructions since we will remove \ul covariances\ulnone  from Motion.\cf0 \
-\cf2          *\cf0 \
-\cf2          * ### \ul Covariances\cf0 \ulnone \
-\cf2          *\cf0 \
-\cf2          * The Motion structure adds local and global \ul covariances\ulnone , that we rename as,\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dC: delta_cov_\cf0 \
-\cf2          *     DC: delta_integr_cov_\cf0 \
-\cf2          *\cf0 \
-\cf2          * and which are integrated as follows\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dC_I = tau * dC_F\cf0 \
-\cf2          *     DC_I = (1-tau) * DC_R + tau * dC_F = DC_R + dC_I\cf0 \
-\cf2          *\cf0 \
-\cf2          * and\cf0 \
-\cf2          *\cf0 \
-\cf2          *     dC_S = (1-tau) * dC_F\cf0 \
-\cf2          *     DC_S = DC_F\cf0 \
-\cf2          *\cf0 \
-\cf2          */\cf0 \
-}
\ No newline at end of file
diff --git a/doc/main.dox b/doc/main.dox
index 62090c8648315200e0ba9b726d55048d74406425..efde8f91eab56973935fe8efa85d64e6dd558b48 100644
--- a/doc/main.dox
+++ b/doc/main.dox
@@ -1,258 +1,29 @@
 // EDIT CAPS TEXTS
 /*! \mainpage WOLF
-
-    \section Description
     
-    WOLF (Windowed Localization Frames) is a software library ...
-
-    \section Main Main Features
+    WOLF (Windowed Localization Frames) is a C++ framework for state estimation based on factor graphs.
     
-    - Uses Eigen3.2. as an algebra engine
-    - Suited for Filtering and Optimization approaches
-    - Allows different state spaces configurations
-    - Minimizes read/write of state vectors and matrix to memory
-
-    \section Contents
-
-    - \link Installation Installation       \endlink
-    - \link how_to       How To             \endlink
-    - \link Performance  Performance		\endlink
-    - \link faq          FAQ                \endlink
-    - \link todo         To Do              \endlink
-    - \link other        OTHER STUFF FTW 	\endlink
-  .
-
-
-  \section Authors
-    Joan Solà (jsola (_at_) iri.upc.edu)
-  
-    Andreu Corominas (acorominas (_at_) iri.upc.edu)
-
-    Faust Terrado  (fterrado (_at_) iri.upc.edu)
+    This is the Doxygen documentation of the  **WOLF core**  library.
     
-    Àngel Santamaria  (asantamaria (_at_) iri.upc.edu)
-
-    Joan Vallvé  (jvallve (_at_) iri.upc.edu)
-   
-    \section License
-
-    TO BE DISCUSSED :
-    This package is licensed under a
-    <a href="http://www.gnu.org/licenses/lgpl.html" target=”_blank”>
-    LGPL 3.0 License</a>.
-
-    \section Disclaimer
-
-    This is free software: you can redistribute it and/or modify
-    it under the terms of the GNU Lesser General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU Lesser General Public License for more details.
-
-    You should have received a copy of the GNU Lesser General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-    \section References References and External Documentation
+    If you are looking for one of the following:
     
-    ... Nothing written here yet ...
-
-*/
-
-
-
-// HERE STARTS THE FUN (THE DIFFERENT PAGES)
-
-// TITLE
-// MINI PAGES MENU
-// CONTENT (SECTIONS...)
-
-// ADDING PAGES:
-//  - add the new page in the menus of each page
-//  - current page is bold "\b", not a \link!
-
-
-/*! \page Installation Installation
-\b Installation | \link how_to How To \endlink | \link Performance \endlink | \link faq FAQ \endlink | \link todo To Do \endlink | \link other OTHER STUFF FTW \endlink
-
-
-To install the WOLF library, please follow the steps below: 
-
-\section Download
-
-Checkout the source code from the IRI public repository
-\code
-cd <your_developing_folder>
-svn checkout https://devel.iri.upc.edu/labrobotica/algorithms/wolf wolf
-\endcode
-
-\section Compilation
-
-After downloading, change directory to wolf/build and execute cmake, 
-\code
-cd build
-cmake ..
-\endcode
-compile the shared library (wolf.so) and the example programs, 
-\code
-make
-\endcode
-
-<!--
-The <em>cmake</em> only need to be executed once (make will automatically call
-<em>cmake</em> if you modify one of the <em>CMakeList.txt</em> files).
--->
-
-Optionally, if you want to generate this documentation in your local machine (uses doxygen default style type), 
-\code
-make doc
-\endcode <!--OTHER-->
-<!-- 
-- To generate this documentation with IRI-DOC style type
-\code
-cd doc
-svn checkout https://devel.iri.upc.edu/labrobotica/tools/iri_doc
-cd ../build; rm -rf *; cmake ..; make; make doc
-\endcode
--->
-
-Please note that the files in the <em>build</em> directory are genetated by <em>cmake</em>
-and <em>make</em> and can be safely removed.
-After doing so you will need to call cmake manually again.
-
-\section Installing_subsec Installing 
-
-In order to be able to use the library, it is necessary to copy it into the system.
-To do that, execute, from the build directory
-\code
-sudo make install
-\endcode
-
-as root and the shared libraries will be copied to <em>/usr/local/lib/iridrivers</em> directory
-and the header files will be copied to <em>/usr/local/include/iridrivers</em> dierctory. At
-this point, the library may be used by any user.
-
-To remove the library from the system, execute
-\code
-sudo make uninstall
-\endcode
-as root, and all the associated files will be removed from the system.
-OTHER
-
-\section Configuration
-
-The default build mode is DEBUG. That is, objects and executables
-include debug information.
-
-The RELEASE build mode optimizes for speed. To build in this mode
-execute
-\code
-cmake .. -DCMAKE_BUILD_TYPE=RELEASE
-\endcode
-The release mode will be kept until next time cmake is executed.
-
-*/
-
-
-
-/*! \page how_to How to use this library
-\link Installation \endlink | \b How \b To | \link Performance \endlink | \link faq FAQ \endlink | \link todo To Do \endlink | \link other OTHER STUFF FTW \endlink
-
-You can use only the functions provided by the library after \link Installation install this library \endlink.
-
-If you need to use this library in your code, you'll have to add these lines into your CMakeLists.txt file.
-
-- first it is necessary to locate if required libraries have been installed or not using the following command (TO DO)
-\code
-FIND_PACKAGE(wolf REQUIRED)
-\endcode
-- In the case that the packages are present, it is necessary to add the header files directory to
-the include directory path by using
-\code
-INCLUDE_DIRECTORIES(${wolf_INCLUDE_DIR})
-\endcode
-- Finally, it is also necessary to link with the desired libraries by using the following command
-\code
-TARGET_LINK_LIBRARIES(<executable name> ${wolf_LIBRARY})
-\endcode
-.
-
-<!-- 
-All these steps are automatically done when the new project is created following the instructions
-in <A href="http://wikiri.upc.es/index.php/Create_a_new_development_project" target=”_blank”>here</a>
--->
-
-From the programming point of view the following example illustrates how to use wolf:
-\code
-#include "state_pvq.h"
-int main()
-{
-    VectorXd storage(20);
-    unsigned int index = 0, bsize = 4;
-
-    //set storage vector
-    storage << 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19;
-
-    VectorXd vec_pvq(StatePVQ::SIZE_);
-    vec_pvq << 20, 21, 22, 23, 24, 25, 26, 27, 28, 29;
-
-    StatePVQ pvq(storage, index + bsize, vec_pvq);
-    cout << "storage   : " << storage.transpose() << endl;
-    cout << "pvq  state: " << pvq.x().transpose() << endl;
-    cout << "pvq  pos  : " << pvq.p().transpose() << endl;
-    cout << "pvq  vel  : " << pvq.v().transpose() << endl;
-    cout << "pvq  quat : " << pvq.q().coeffs().transpose() << endl;
-
-    VectorXd vec_pvq2(StatePVQ::SIZE_);
-    vec_pvq2 << 30, 31, 32, 33, 34, 35, 36, 37, 38, 39;
-    pvq.x(vec_pvq2);
-    cout << "storage   : " << storage.transpose() << endl;
-    cout << "pvq  state: " << pvq.x().transpose() << endl;
-
-
-    return 0;
-}
-\endcode
-
-For further code examples, please see the files at src/examples where there are some "main()" instances that make use of the library in different useful ways.
-
-*/
-
-/*! \page Performance Performance
-\link Installation \endlink | \link how_to How To \endlink | \b Performance | \link faq FAQ \endlink | \link todo To Do \endlink | \link other OTHER STUFF FTW \endlink
-
-The following plot shows the processing time spent to compute ...
-
- <!-- \image html "images/xxx.png" -->
-*/ 
-
-/*! \page faq Frequently Asked Questions
-\link Installation \endlink | \link how_to How To \endlink | \link Performance \endlink | \b FAQ | \link todo To Do \endlink | \link other OTHER STUFF FTW \endlink
-
-Do I need some extra libraries to compile WOLF ?
-- Yes, please take a look at the \link Installation \endlink page.
-
-*/
-
-
-// THIS IS SHOWN BEFORE THE TO DO AUTOMATIC PAGE
-
-/*! \page todo To Do
-\link Installation \endlink | \link how_to How To \endlink | \link Performance \endlink | \link faq FAQ \endlink| \b To \b Do | \link other OTHER STUFF FTW \endlink
-- some to do 
-- another to do
-*/
-
-
-
-
-/*! \page other OTHER STUFF FTW
-\link Installation \endlink | \link how_to How To \endlink | \link Performance \endlink | \link faq FAQ \endlink | \link todo To Do \endlink | \b OTHER \b STUFF \b FTW
-
-HERE IS YOUR CONTENT FOR THE WIN
-
-
+    - full documentation
+    - installation instructions
+    - access to the repositories
+    - WOLF plugins
+    - ROS packages
+     
+    please visit the main WOLF page at http://www.iri.upc.edu/wolf.
+    
+    WOLF is copyright 2015--2022
+    
+    - Joan Solà
+    - Joan Vallvé
+    - Joaquim Casals, 
+    - Jérémie Deray,
+    - Médéric Fourmy, 
+    - Dinesh Atchuthan, 
+    - Andreu Corominas-Murtra
+    
+    at IRI-CSIC-UPC.
 */
diff --git a/include/vision/capture/capture_image.h b/include/vision/capture/capture_image.h
index 367d230e8710aa945b7fe462fc32d0942a244c54..c31f03fb61799b4e3fdf98d0fa787f1c59e2dda2 100644
--- a/include/vision/capture/capture_image.h
+++ b/include/vision/capture/capture_image.h
@@ -19,20 +19,66 @@
 // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 //
 //--------LICENSE_END--------
+/**
+ * \file gtest_factor_epipolar.cpp
+ *
+ *  Created on: March 31, 2022
+ *      \author: mfourmy
+ */
+
 #ifndef CAPTURE_IMAGE_H
 #define CAPTURE_IMAGE_H
 
+// OpenCV includes
+#include <opencv2/core.hpp>
+
+
 //Wolf includes
 #include <core/capture/capture_base.h>
-#include "vision/feature/feature_point_image.h"
 #include "vision/sensor/sensor_camera.h"
 
-// Vision Utils includes
-#include <vision_utils/vision_utils.h>
-#include <vision_utils/common_class/frame.h>
 
 namespace wolf {
 
+WOLF_PTR_TYPEDEFS(WKeyPoint);
+class WKeyPoint 
+{
+    private:
+        static size_t id_count_;  // class attribute automatically incremented when a new WKeyPoint object is instanciated 
+        size_t id_;
+        cv::KeyPoint cv_kp_;
+        cv::Mat desc_;
+    
+    public:
+
+        WKeyPoint();
+        WKeyPoint(const cv::KeyPoint& _cv_kp);
+
+        size_t getId() const {return id_;}
+        void setId(size_t _id) {id_ = _id;}
+
+        cv::KeyPoint getCvKeyPoint() const {return cv_kp_;}
+        void setCvKeyPoint(cv::KeyPoint _cv_kp) {cv_kp_ = _cv_kp;}
+
+        Eigen::Vector2d getEigenKeyPoint() const {return Eigen::Vector2d(cv_kp_.pt.x, cv_kp_.pt.y);}
+        cv::Point2d getCvPoint() const {return cv_kp_.pt;}
+
+        cv::Mat getDescriptor() const {return desc_;}
+        void setDescriptor(cv::Mat _desc) {desc_ = _desc;}
+
+        // Only used for gtest, should be moved
+        static void resetIdCount() {id_count_ = 0;}
+};
+
+
+// This map is frame specific and enables to recover a Wolf KeyPoint with a certain
+// ID in a frame
+typedef std::unordered_map<size_t, WKeyPoint> KeyPointsMap;
+
+// This maps the IDs of the Wolf KeyPoints that are tracked from a frame to the other.
+// It takes the ID of a WKeyPoint and returns the ID of its track in another Frame.
+typedef std::unordered_map<size_t, size_t> TracksMap;
+
 // Set ClassPtr, ClassConstPtr and ClassWPtr typedefs;
 WOLF_PTR_TYPEDEFS(CaptureImage);
     
@@ -44,29 +90,53 @@ WOLF_PTR_TYPEDEFS(CaptureImage);
  */
 class CaptureImage : public CaptureBase
 {
-    protected:
-        vision_utils::Frame frame_;
+    private:
+        cv::Mat img_;
 
-    public:
-        vision_utils::FeatureIdxGridPtr grid_features_;
-        KeyPointVector                  keypoints_;
-        cv::Mat                         descriptors_;
-        DMatchVector                    matches_from_precedent_;
-        std::vector<double>             matches_normalized_scores_;
-        std::map<int, int>              map_index_to_next_;
-        cv::Mat                         global_descriptor_;
+        // Keypoints associated to the capture 
+        KeyPointsMap mwkps_;
+
+        // descriptors of the keypoints if necessary. 
+        // number of rows ==  keypoints_.size() if intialized
+        cv::Mat descriptors_;
+
+        // keeps track from the origin capture (origin->incoming): used for outlier detection
+        TracksMap tracks_origin_;
+
+        // keeps track from the previous capture (last->incoming): by the rest of the processor to populate the tack matrix
+        TracksMap tracks_prev_;
+
+        bool last_was_repopulated_;
 
     public:
         CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _data_cv);
         ~CaptureImage() override;
 
         const cv::Mat& getImage() const;
-        void setDescriptors(const cv::Mat &_descriptors);
-        void setKeypoints(const std::vector<cv::KeyPoint>& _keypoints);
-        cv::Mat& getDescriptors();
-        std::vector<cv::KeyPoint>& getKeypoints();
-        void setGlobalDescriptor(const cv::Mat &_global_descriptor);
-        cv::Mat& getGlobalDescriptor();
+        void setImage(const cv::Mat& _img);
+
+        const KeyPointsMap getKeyPoints() const {return mwkps_;}
+        void setKeyPoints(const KeyPointsMap& _mwkps){mwkps_ = _mwkps;}
+
+        const TracksMap& getTracksPrev() const {return tracks_prev_;}
+        void setTracksPrev(const TracksMap& _tracks){tracks_prev_ = _tracks;}
+
+        const TracksMap& getTracksOrigin() const {return tracks_origin_;}
+        void setTracksOrigin(const TracksMap& _tracks){tracks_origin_ = _tracks;}
+
+        bool getLastWasRepopulated() const {return last_was_repopulated_;}
+        void setLastWasRepopulated(bool _repopulated){last_was_repopulated_ = _repopulated;}
+
+        void addKeyPoint(const WKeyPoint& _wkp);
+        void addKeyPoint(const cv::KeyPoint& _cv_kp);
+
+        void addKeyPoints(const std::vector<WKeyPoint>& _vec_wkp);
+        void addKeyPoints(const std::vector<cv::KeyPoint>& _vec_cv_kp);
+        void addKeyPoints(const KeyPointsMap& _mwkps);
+
+        void removeKeyPoint(size_t _kp_id);
+        void removeKeyPoint(const WKeyPoint& _wkp);
+
 };
 
 } // namespace wolf
diff --git a/include/vision/factor/factor_ahp.h b/include/vision/factor/factor_ahp.h
index 4c47c0912e357b79cc2728c1cedb4555467e227b..833a3f3c34fb58856af214444f78a172d51b3225 100644
--- a/include/vision/factor/factor_ahp.h
+++ b/include/vision/factor/factor_ahp.h
@@ -103,9 +103,9 @@ inline FactorAhp::FactorAhp(const FeatureBasePtr&   _ftr_ptr,
 
 inline Eigen::VectorXd FactorAhp::expectation() const
 {
-    FrameBasePtr frm_current = getFeature()->getCapture()->getFrame();
-    FrameBasePtr frm_anchor  = getFrameOther();
-    LandmarkBasePtr lmk      = getLandmarkOther();
+    auto frm_current = getFeature()->getCapture()->getFrame();
+    auto frm_anchor  = getFrameOther();
+    auto lmk         = getLandmarkOther();
 
     const Eigen::MatrixXd frame_current_pos = frm_current->getP()->getState();
     const Eigen::MatrixXd frame_current_ori = frm_current->getO()->getState();
@@ -151,7 +151,7 @@ inline void FactorAhp::expectation(const T* const _current_frame_p,
     TransformType       T_R0_C0 = t_r0_c0 * q_r0_c0;
 
     // current robot to current camera transform
-    CaptureBasePtr      current_capture = this->getFeature()->getCapture();
+    auto                current_capture = this->getFeature()->getCapture();
     Translation<T, 3>   t_r1_c1  (current_capture->getSensorP()->getState().cast<T>());
     Quaterniond         q_r1_c1_s(Eigen::Vector4d(current_capture->getSensorO()->getState()));
     Quaternion<T>       q_r1_c1 = q_r1_c1_s.cast<T>();
diff --git a/include/vision/factor/factor_pixel_hp.h b/include/vision/factor/factor_pixel_hp.h
index 0f7e18ed2708467dafab8655303c7a97f49bbb15..8693b5f7262e29c61ffaab3c573fa8b19ff33cd9 100644
--- a/include/vision/factor/factor_pixel_hp.h
+++ b/include/vision/factor/factor_pixel_hp.h
@@ -46,10 +46,10 @@ class FactorPixelHp : public FactorAutodiff<FactorPixelHp, 2, 3, 4, 3, 4, 4>
     public:
 
         FactorPixelHp(const FeatureBasePtr&   _ftr_ptr,
-                      const LandmarkHpPtr&   _landmark_ptr,
+                      const LandmarkHpPtr&    _landmark_ptr,
                       const ProcessorBasePtr& _processor_ptr,
-                      bool              _apply_loss_function,
-                      FactorStatus  _status = FAC_ACTIVE);
+                      bool                    _apply_loss_function,
+                      FactorStatus            _status = FAC_ACTIVE);
 
         ~FactorPixelHp() override = default;
 
@@ -94,26 +94,34 @@ inline FactorPixelHp::FactorPixelHp(const FeatureBasePtr&   _ftr_ptr,
                                                         _landmark_ptr->getP()),
         intrinsic_(_ftr_ptr->getCapture()->getSensor()->getIntrinsic()->getState())
 {
-//	std::cout << "FactorPixelHp::Constructor\n";
-    // obtain some intrinsics from provided sensor
     distortion_ = (std::static_pointer_cast<SensorCamera>(_ftr_ptr->getCapture()->getSensor()))->getDistortionVector();
 }
 
 inline Eigen::VectorXd FactorPixelHp::expectation() const
 {
-    FrameBasePtr frm = getFeature()->getCapture()->getFrame();
-    SensorBasePtr sen  = getFeature()->getCapture()->getSensor();
-    LandmarkBasePtr lmk      = getLandmarkOther();
-
-    const Eigen::MatrixXd frame_pos = frm->getP()->getState();
-    const Eigen::MatrixXd frame_ori = frm->getO()->getState();
-    const Eigen::MatrixXd sensor_pos  = sen ->getP()->getState();
-    const Eigen::MatrixXd sensor_ori  = sen ->getO()->getState();
-    const Eigen::MatrixXd lmk_pos_hmg       = lmk        ->getP()->getState();
+<<<<<<< HEAD
+    FrameBasePtr    frm = getFeature()->getCapture()->getFrame();
+    SensorBasePtr   sen = getFeature()->getCapture()->getSensor();
+    LandmarkBasePtr lmk = getLandmarkOther();
+=======
+    auto frm  = getFeature()->getCapture()->getFrame();
+    auto sen  = getFeature()->getCapture()->getSensor();
+    auto lmk  = getLandmarkOther();
+>>>>>>> devel
+
+    const Eigen::MatrixXd frame_pos     = frm->getP()->getState();
+    const Eigen::MatrixXd frame_ori     = frm->getO()->getState();
+    const Eigen::MatrixXd sensor_pos    = sen->getP()->getState();
+    const Eigen::MatrixXd sensor_ori    = sen->getO()->getState();
+    const Eigen::MatrixXd lmk_pos_hmg   = lmk->getP()->getState();
 
     Eigen::Vector2d exp;
-    expectation(frame_pos.data(), frame_ori.data(), sensor_pos.data(), sensor_ori.data(),
-    			lmk_pos_hmg.data(), exp.data());
+    expectation(frame_pos.data(),
+                frame_ori.data(),
+                sensor_pos.data(),
+                sensor_ori.data(),
+    			lmk_pos_hmg.data(),
+    			exp.data());
 
     return exp;
 }
@@ -128,48 +136,35 @@ inline void FactorPixelHp::expectation(const T* const _frame_p,
 {
     using namespace Eigen;
 
-    // All involved transforms typedef
-    typedef Eigen::Transform<T, 3, Eigen::Isometry> TransformType;
-
-    // world to current robot transform
-    Map<const Matrix<T, 3, 1> > p_w_r(_frame_p);
-    Translation<T, 3>           t_w_r(p_w_r);
-    Map<const Quaternion<T> >   q_w_r(_frame_o);
-    TransformType               T_w_r = t_w_r * q_w_r;
-
-    // current robot to current camera transform
-    Map<const Matrix<T, 3, 1> > p_r_c(_sensor_p);
-    Translation<T, 3>           t_r_c(p_r_c);
-    Map<const Quaternion<T> >  	q_r_c(_sensor_o);
-    TransformType       		T_r_c = t_r_c * q_r_c;
-
-    // hmg point in current camera frame C
-    Eigen::Map<const Eigen::Matrix<T, 4, 1> > landmark_hmg(_lmk_hmg);
-    Eigen::Matrix<T, 4, 1> landmark_hmg_c = T_r_c .inverse(Eigen::Isometry)
-                                           * T_w_r .inverse(Eigen::Isometry)
-                                           * landmark_hmg;
+    // frames w: world; r: robot; c: camera
+    Map<const Matrix<T, 3, 1> > p_wr(_frame_p);
+    Map<const Quaternion<T> >   q_wr(_frame_o);
+    Map<const Matrix<T, 3, 1> > p_rc(_sensor_p);
+    Map<const Quaternion<T> >   q_rc(_sensor_o);
 
-    //std::cout << "p_w_r = \n\t" << _frame_p[0] << "\n\t" << _frame_p[1] << "\n\t" << _frame_p[2] << "\n";
-//    std::cout << "q_w_r = \n\t" << _frame_o[0] << "\n\t" << _frame_o[1] << "\n\t" << _frame_o[2] << "\n\t" << _frame_o[3] << "\n";
-//    std::cout << "p_r_c = \n\t" << _sensor_p[0] << "\n\t" << _sensor_p[1] << "\n\t" << _sensor_p[2] << "\n";
-//    std::cout << "q_r_c = \n\t" << _sensor_o[0] << "\n\t" << _sensor_o[1] << "\n\t" << _sensor_o[2] << "\n\t" << _sensor_o[3] << "\n";
-//    std::cout << "landmark_hmg_c = \n\t" << landmark_hmg_c(0) << "\n\t" << landmark_hmg_c(1) << "\n\t" << landmark_hmg_c(2) << "\n\t" << landmark_hmg_c(3) << "\n";
+    // camera pose in world frame: transforms from camera to world
+    Matrix<T, 3, 1> p_wc = p_wr + q_wr * p_rc;
+    Quaternion<T>   q_wc = q_wr*q_rc;
 
-    // lmk direction vector
-    Eigen::Matrix<T, 3, 1> v_dir = landmark_hmg_c.head(3);
+    // invert transform: from world to camera
+    // | R T |.inv = | R.tr -R.tr*T | == | q.conj -q.conj*T |
+    // | 0 1 |       |  0      1    |    |   0        1     |
+    Quaternion<T>   q_cw = q_wc.conjugate();
+    Matrix<T, 3, 1> p_cw = - (q_cw * p_wc);
 
-    // lmk inverse distance
-    T rho = landmark_hmg_c(3);
+    // landmark hmg
+    Matrix<T, 4, 1> lh_w(_lmk_hmg);
 
-    // camera parameters
-    Matrix<T, 4, 1> intrinsic = intrinsic_.cast<T>();
-    Eigen::Matrix<T, Eigen::Dynamic, 1> distortion = distortion_.cast<T>();
+    // landmark dir vector in C frame
+    /* note: transforming hmg point to get direction vector v':
+     * | q T | * | v | = | q*v + T+w | --> v' = q*v + T+w
+     * | 0 1 |   | w |   |     w     |
+     */
+    Matrix<T, 3, 1> v_dir = q_cw * lh_w.template head<3>() + p_cw * lh_w(3);
 
     // project point and exit
     Eigen::Map<Eigen::Matrix<T, 2, 1> > expectation(_expectation);
-    expectation = pinhole::projectPoint(intrinsic, distortion, v_dir/rho);
-
-//    std::cout << "expectation = \n\t" << expectation(0) << "\n\t" << expectation(1) << "\n";
+    expectation = pinhole::projectPoint(intrinsic_, distortion_, v_dir);
 
 }
 
@@ -185,12 +180,10 @@ inline bool FactorPixelHp::operator ()(const T* const _frame_p,
     Eigen::Matrix<T, 2, 1> expected;
     expectation(_frame_p, _frame_o, _sensor_p, _sensor_o, _lmk_hmg, expected.data());
 
-    // measured
-    Eigen::Matrix<T, 2, 1> measured = getMeasurement().cast<T>();
-
     // residual
     Eigen::Map<Eigen::Matrix<T, 2, 1> > residuals(_residuals);
-    residuals = getMeasurementSquareRootInformationUpper().cast<T>() * (expected - measured);
+    residuals = getMeasurementSquareRootInformationUpper() * (expected - getMeasurement());
+
     return true;
 }
 
diff --git a/include/vision/factor/factor_trifocal.h b/include/vision/factor/factor_trifocal.h
index 646a7fe664475eaac7d23be4617d0c95099866fb..b2cf8514edcfef30f093d8f3c29d3f8a50114076 100644
--- a/include/vision/factor/factor_trifocal.h
+++ b/include/vision/factor/factor_trifocal.h
@@ -44,16 +44,17 @@ class FactorTrifocal : public FactorAutodiff<FactorTrifocal, 3, 3, 4, 3, 4, 3, 4
         /** \brief Class constructor
          */
         FactorTrifocal(const FeatureBasePtr& _feature_1_ptr,
-                               const FeatureBasePtr& _feature_2_ptr,
-                               const FeatureBasePtr& _feature_own_ptr,
-                               const ProcessorBasePtr& _processor_ptr,
-                               bool _apply_loss_function,
-                               FactorStatus _status);
+                       const FeatureBasePtr& _feature_2_ptr,
+                       const FeatureBasePtr& _feature_own_ptr,
+                       const ProcessorBasePtr& _processor_ptr,
+                       bool _apply_loss_function,
+                       FactorStatus _status);
 
         /** \brief Class Destructor
          */
         ~FactorTrifocal() override;
 
+        FeatureBaseConstPtr getFeaturePrev() const;
         FeatureBasePtr getFeaturePrev();
 
         const Vector3d& getPixelCanonical3() const
@@ -169,10 +170,10 @@ FactorTrifocal::FactorTrifocal(const FeatureBasePtr& _feature_1_ptr,
         FactorAutodiff( "TRIFOCAL PLP",
                         TOP_GEOM,
                         _feature_own_ptr,
-                        nullptr,
-                        nullptr,
-                        _feature_2_ptr, //< this sets feature 2 (the one between the oldest and the newest)
-                        nullptr,
+                        FrameBasePtrList(),
+                        CaptureBasePtrList(),
+                        FeatureBasePtrList({_feature_2_ptr, _feature_1_ptr}), //< this sets feature 2 (the one between the oldest and the newest)
+                        LandmarkBasePtrList(),
                         _processor_ptr,
                         _apply_loss_function,
                         _status,
@@ -189,7 +190,7 @@ FactorTrifocal::FactorTrifocal(const FeatureBasePtr& _feature_1_ptr,
         sqrt_information_upper(Matrix3d::Zero())
 {
     // First add feature_1_ptr to the list of features (because the constructor FeatureAutodiff did not do so)
-    if (_feature_1_ptr) feature_other_list_.push_back(_feature_1_ptr);
+    //if (_feature_1_ptr) feature_other_list_.push_back(_feature_1_ptr);
 
     // Store some geometry elements
     Matrix3d K_inv          = camera_ptr_->getIntrinsicMatrix().inverse();
diff --git a/include/vision/feature/feature_point_image.h b/include/vision/feature/feature_point_image.h
index fd04f7ce3e762003b6b2db687339924137e8f931..0cf4941be09c03c3abb33b0c5042ff56cdd3fb07 100644
--- a/include/vision/feature/feature_point_image.h
+++ b/include/vision/feature/feature_point_image.h
@@ -19,14 +19,17 @@
 // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 //
 //--------LICENSE_END--------
-#ifndef FEATURE_IMAGE_H
-#define FEATURE_IMAGE_H
+
+#ifndef FEATURE_POINT_IMAGE_H
+#define FEATURE_POINT_IMAGE_H
+
+// OpenCV includes
+#include <opencv2/core.hpp>
 
 //Wolf includes
 #include "core/feature/feature_base.h"
+#include "vision/capture/capture_image.h"
 
-// Vision Utils includes
-#include <vision_utils/vision_utils.h>
 
 namespace wolf {
 
@@ -36,123 +39,26 @@ WOLF_PTR_TYPEDEFS(FeaturePointImage);
 class FeaturePointImage : public FeatureBase
 {
     private:
-        cv::KeyPoint keypoint_; ///< Warning: every write operation to this member needs to write measurement_. See setKeypoint() as an example.
-        unsigned int index_keypoint_;
-        cv::Mat descriptor_;
-        bool is_known_;
-        double score_;
-        cv::Rect tracker_roi_;
-
+        WKeyPoint kp_;
+        
     public:
-
-        /// Constructor from Eigen measured pixel
-        FeaturePointImage(const Eigen::Vector2d & _measured_pixel,
-                          const unsigned int& _index_keypoint,
-                          const cv::Mat& _descriptor,
-                          const Eigen::Matrix2d& _meas_covariance) :
-                FeatureBase("POINT IMAGE", _measured_pixel, _meas_covariance),
-                keypoint_(_measured_pixel(0), _measured_pixel(1), 1), // Size 1 is a dummy value
-                index_keypoint_(_index_keypoint),
-                descriptor_( _descriptor),
-                is_known_(false),
-                score_(0)
-        {
-            keypoint_.pt.x = measurement_(0);
-            keypoint_.pt.y = measurement_(1);
-        }
-
         /// Constructor from OpenCV measured keypoint
-        FeaturePointImage(const cv::KeyPoint& _keypoint,
-                          const unsigned int& _index_keypoint,
-                          const cv::Mat& _descriptor,
-                          const Eigen::Matrix2d& _meas_covariance) :
-                FeatureBase("POINT IMAGE", Eigen::Vector2d::Zero(), _meas_covariance),
-                keypoint_(_keypoint),
-                index_keypoint_(_index_keypoint),
-                descriptor_(_descriptor),
-                is_known_(false),
-                score_(0)
-        {
-            measurement_(0) = double(_keypoint.pt.x);
-            measurement_(1) = double(_keypoint.pt.y);
-        }
+        FeaturePointImage(const WKeyPoint& _keypoint,
+                          const Eigen::Matrix2d& _meas_covariance);
 
        ~FeaturePointImage() override;
 
-        const cv::KeyPoint& getKeypoint() const;
-        void setKeypoint(const cv::KeyPoint& _kp);
-
-        const cv::Mat& getDescriptor() const;
-        void setDescriptor(const cv::Mat& _descriptor);
-
-        SizeStd getIndexKeyPoint() const
-        { return index_keypoint_; }
-
-        bool isKnown();
-        void setIsKnown(bool _is_known);
-
-        /*Eigen::VectorXd & getMeasurement(){
-            measurement_(0) = double(keypoint_.pt.x);
-            measurement_(1) = double(keypoint_.pt.y);
-            return measurement_;
-        }*/
-
-        const cv::Rect& getTrackerRoi() const;
-        void setTrackerRoi(const cv::Rect& tracker_roi);
-
-        double getScore() const
-        {
-            return score_;
-        }
-
-        void setScore(double score)
-        {
-            score_ = score;
-        }
+        const WKeyPoint& getKeyPoint() const {return kp_;}
+        void setKeyPoint(const WKeyPoint& _kp);
 };
 
-inline const cv::KeyPoint& FeaturePointImage::getKeypoint() const
-{
-    return keypoint_;
-}
-
-inline void FeaturePointImage::setKeypoint(const cv::KeyPoint& _kp)
-{
-    keypoint_ = _kp;
-    measurement_(0) = _kp.pt.x;
-    measurement_(1) = _kp.pt.y;
-}
-
-inline const cv::Mat& FeaturePointImage::getDescriptor() const
-{
-    return descriptor_;
-}
-
-inline void FeaturePointImage::setDescriptor(const cv::Mat& _descriptor)
-{
-    descriptor_ = _descriptor;
-}
-
-inline bool FeaturePointImage::isKnown()
-{
-    return is_known_;
-}
-
-inline void FeaturePointImage::setIsKnown(bool _is_known)
-{
-    is_known_ = _is_known;
-}
-
-inline const cv::Rect& FeaturePointImage::getTrackerRoi() const
-{
-    return tracker_roi_;
-}
-
-inline void FeaturePointImage::setTrackerRoi(const cv::Rect& tracker_roi)
+inline void FeaturePointImage::setKeyPoint(const WKeyPoint& _kp)
 {
-    tracker_roi_ = tracker_roi;
+    kp_ = _kp;
+    measurement_(0) = _kp.getCvKeyPoint().pt.x;
+    measurement_(1) = _kp.getCvKeyPoint().pt.y;
 }
 
 } // namespace wolf
 
-#endif // FEATURE_IMAGE_H
+#endif // FEATURE_POINT_IMAGE_H
diff --git a/include/vision/landmark/landmark_ahp.h b/include/vision/landmark/landmark_ahp.h
index 14fd65b2befe4fd92653cd213d544d7a1586e250..a60dc0cb890456d1c00f9d7d94f45205aaa986ee 100644
--- a/include/vision/landmark/landmark_ahp.h
+++ b/include/vision/landmark/landmark_ahp.h
@@ -22,14 +22,15 @@
 #ifndef LANDMARK_AHP_H
 #define LANDMARK_AHP_H
 
-//Wolf includes
-#include "core/landmark/landmark_base.h"
-
 // yaml
 #include <yaml-cpp/yaml.h>
 
-// Vision utils
-#include <vision_utils/vision_utils.h>
+// OpenCV includes
+#include <opencv2/core.hpp>
+
+//Wolf includes
+#include "core/landmark/landmark_base.h"
+
 
 namespace wolf {
     
@@ -51,8 +52,10 @@ class LandmarkAhp : public LandmarkBase
         const cv::Mat& getCvDescriptor() const;
         void setCvDescriptor(const cv::Mat& _descriptor);
 
-        const FrameBasePtr  getAnchorFrame () const;
-        const SensorBasePtr getAnchorSensor() const;
+        FrameBaseConstPtr  getAnchorFrame () const;
+        FrameBasePtr  getAnchorFrame ();
+        SensorBaseConstPtr getAnchorSensor() const;
+        SensorBasePtr getAnchorSensor();
 
         void setAnchorFrame  (FrameBasePtr  _anchor_frame );
         void setAnchorSensor (SensorBasePtr _anchor_sensor);
@@ -79,7 +82,12 @@ inline void LandmarkAhp::setCvDescriptor(const cv::Mat& _descriptor)
     cv_descriptor_ = _descriptor;
 }
 
-inline const FrameBasePtr LandmarkAhp::getAnchorFrame() const
+inline FrameBaseConstPtr LandmarkAhp::getAnchorFrame() const
+{
+    return anchor_frame_;
+}
+
+inline FrameBasePtr LandmarkAhp::getAnchorFrame() 
 {
     return anchor_frame_;
 }
@@ -89,7 +97,12 @@ inline void LandmarkAhp::setAnchorFrame(FrameBasePtr _anchor_frame)
     anchor_frame_ = _anchor_frame;
 }
 
-inline const SensorBasePtr LandmarkAhp::getAnchorSensor() const
+inline SensorBaseConstPtr LandmarkAhp::getAnchorSensor() const
+{
+    return anchor_sensor_;
+}
+
+inline SensorBasePtr LandmarkAhp::getAnchorSensor()
 {
     return anchor_sensor_;
 }
diff --git a/include/vision/landmark/landmark_hp.h b/include/vision/landmark/landmark_hp.h
index af2b5c5aa030df2cefd7b4f025cd86782c54f3b0..9ab26790bef0c6a31d709c2439b52edd97a72503 100644
--- a/include/vision/landmark/landmark_hp.h
+++ b/include/vision/landmark/landmark_hp.h
@@ -28,9 +28,8 @@
 // yaml
 #include <yaml-cpp/yaml.h>
 
-// Vision utils
-#include <vision_utils/vision_utils.h>
-
+// OpenCV includes
+#include <opencv2/core.hpp>
 namespace wolf {
     
 WOLF_PTR_TYPEDEFS(LandmarkHp);
@@ -43,7 +42,7 @@ class LandmarkHp : public LandmarkBase
 
 
     public:
-        LandmarkHp(Eigen::Vector4d _position_homogeneous, SensorBasePtr _sensor_, cv::Mat _2d_descriptor);
+        LandmarkHp(Eigen::Vector4d _position_homogeneous, cv::Mat _2d_descriptor);
 
         ~LandmarkHp() override;
 
diff --git a/include/vision/landmark/landmark_point_3d.h b/include/vision/landmark/landmark_point_3d.h
index 045248fe74be3683edcbf179db4f1c604c82c3dd..1db9d6e7985f4d056a7f7ed17b657ebab945d455 100644
--- a/include/vision/landmark/landmark_point_3d.h
+++ b/include/vision/landmark/landmark_point_3d.h
@@ -22,11 +22,13 @@
 #ifndef LANDMARK_POINT_3d_H
 #define LANDMARK_POINT_3d_H
 
+
+// OpenCV includes
+#include <opencv2/core.hpp>
+
 //Wolf includes
 #include "core/landmark/landmark_base.h"
 
-// Vision Utils includes
-#include <vision_utils/vision_utils.h>
 
 namespace wolf {
 
diff --git a/include/vision/math/pinhole_tools.h b/include/vision/math/pinhole_tools.h
index 4d55810f014168f07ce1459c09b373324c5bd462..99d66a4e3341d5d353548adb39c9aca73d45bf79 100644
--- a/include/vision/math/pinhole_tools.h
+++ b/include/vision/math/pinhole_tools.h
@@ -272,8 +272,7 @@ Matrix<typename Derived2::Scalar, 2, 1> distortPoint(const MatrixBase<Derived1>
     EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived1);
     MatrixSizeCheck<2,1>::check(up);
 
-    SizeEigen n = d.size();
-    if (n == 0)
+    if (d.size() == 0)
         return up;
     else {
         typename Derived2::Scalar r2 = up(0) * up(0) + up(1) * up(1); // this is the norm squared: r2 = ||u||^2
@@ -429,12 +428,13 @@ Matrix<typename Derived2::Scalar, 2, 1> pixellizePoint(const MatrixBase<Derived1
     MatrixSizeCheck<4,1>::check(k);
     MatrixSizeCheck<2,1>::check(ud);
 
+    typedef typename Derived1::Scalar S;
     typedef typename Derived2::Scalar T;
 
-    T u_0 = k(0);
-    T v_0 = k(1);
-    T a_u = k(2);
-    T a_v = k(3);
+    const S& u_0 = k(0);
+    const S& v_0 = k(1);
+    const S& a_u = k(2);
+    const S& a_v = k(3);
     Matrix<T, 2, 1> u;
 
     u(0) = u_0 + a_u * ud(0);
diff --git a/include/vision/processor/active_search.h b/include/vision/processor/active_search.h
new file mode 100644
index 0000000000000000000000000000000000000000..dc22ec79f38ead6d0b6f359638244a67b64755bf
--- /dev/null
+++ b/include/vision/processor/active_search.h
@@ -0,0 +1,335 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+/**
+ * \file active_search.h
+ *
+ *  Active search detection and matching for points.
+ *
+ * \date 10/04/2016
+ * \author jsola, dinesh
+ */
+
+#ifndef ACTIVESEARCH_H_
+#define ACTIVESEARCH_H_
+
+// Wolf includes
+#include <core/common/wolf.h>
+
+//OpenCV includes
+#include <opencv2/core/core.hpp>
+#include <opencv2/features2d/features2d.hpp>
+
+namespace wolf{
+
+/**
+ * \brief Active search tesselation grid.
+ *
+ * \author jsola, dinesh
+ *
+ * This class implements a tesselation grid for achieving active search
+ * behavior in landmark initialization.
+ *
+ * The grid defines a set of cells in the image.
+ * The idea is to count the number of projected landmarks, or active tracks, per grid cell,
+ * and use one randomly chosen cell that is empty
+ * for feature detection and landmark or track initialization.
+ * This guarantees that the system will automatically populate all the
+ * regions of the image.
+ *
+ * The feature density can be controlled by adjusting the grid's number of cells.  Important notes:
+ * - Typically, use grids of 5x5 to 18x12 cells.
+ * - Try to make reasonably square cells.
+ * - The final cell sizes are always integers, even if the H and V number of cells are not an exact divisors of the image size.
+ *
+ * This class implements a few interesting features:
+ * - The grid can be randomly re-positioned at each frame to avoid dead zones at the cell edges.
+ * - Only the inner cells are activated for feature detection to avoid reaching the image edges.
+ * - The region of interest (ROI) associated with a particular cell is shrunk with a parameterizable amount
+ *   to guarantee a minimum 'separation' between existing and new features.
+ * - The region of interest is ensured to lie at a distance from the image boundaries, defined by the parameter 'margin'.
+ *
+ * The blue and green grids in the figure below represent the grid
+ * at two different offsets, corresponding to two different frames.
+ *
+ *   \image html tesselationGrid.png "The tesselation grid used for active feature detection and initialization"
+ *
+ * This second figure shows a typical situation that we use to explain the basic mechanism.
+ *
+ *   \image html tesselationExample.png "A typical configuration of the tesselation grid"
+ *
+ * Observe the figure and use the following facts as an operation guide:
+ * - The grid is offset by a fraction of a cell size.
+ *     - use renew() at each frame to clear the grid and set a new offset.
+ * - Projected landmarks are represented by red dots.
+ *     - After projection, use hitCell() to add a new dot to the grid.
+ * - Cells with projected landmarks inside are 'occupied'.
+ * - Only the inner cells (thick blue rectangle) are considered for Region of Interest (ROI) extraction.
+ * - One cell is chosen randomly among those that are empty.
+ *     - Use pickRoi() to obtain an empty ROI for initialization.
+ * - The ROI is smaller than the cell to guarantee a minimum feature separation.
+ *     - Use the optional 'separation' parameter at construction time to control this separation.
+ * - A new feature is to be be searched inside this ROI.
+ * - If there is no feature found in this ROI, call blockCell() function to avoid searching in this area again.
+ * - If you need to search more than one feature per frame, proceed like this:
+ *     - Call pickRoi().
+ *     - Try to detect a Feature in ROI.
+ *     - If successful detection
+ *         - add the detected pixel with hitCell().
+ *     - Else
+ *         - block the cell with blockCell().
+ *     - Repeat these steps for each feature to be searched.
+ *
+ * We include here a schematic active-search pseudo-code algorithm to illustrate its operation:
+ * \code
+ * // Init necessary objects
+ * ActiveSearch activeSearch;
+ * ActiveSearchGrid grid(640, 480, 4, 4, 10); // Construction with 10 pixels separation.
+ *
+ * // We start projecting everybody
+ * for (obs = begin(); obs != end(); obs++)   // loop observations
+ * {
+ *   obs->project();
+ *   if (obs->isVisible())
+ *     grid.hitCell(obs->pixel());   // add only visible landmarks
+ * }
+ *
+ * // Then we process the selected observations
+ * activeSearch.selectObs();                  // select interesting features
+ * for (activeSearch.selectedObs);            // loop selected obs
+ *   obs.process();                           // process observation
+ *
+ * // Now we go to initialization
+ * num_new_detections = 0;
+ * while(num_new_detections < max_detections)
+ *   grid.pickRoi(roi);                         // roi is now region of interest
+ *   if (detectFeature(roi))                    // detect inside ROI
+ *     initLandmark();                          // initialize only if successful detection
+ *     num_new_detections++;
+ *   else
+ *     blockCell(roi)
+ *
+ * \endcode
+ *
+ */
+class ActiveSearchGrid {
+
+    private:
+        Eigen::Vector2i img_size_;
+        Eigen::Vector2i grid_size_;
+        Eigen::Vector2i cell_size_;
+        Eigen::Vector2i offset_;
+        Eigen::Vector2i roi_coordinates_;
+        Eigen::MatrixXi projections_count_;
+        Eigen::MatrixXi empty_cells_tile_tmp_;
+        int separation_;
+        int margin_;
+
+    public:
+        EIGEN_MAKE_ALIGNED_OPERATOR_NEW; // to guarantee alignment (see http://eigen.tuxfamily.org/dox-devel/group__TopicStructHavingEigenMembers.html)
+
+        /**
+         * \brief Void constructor
+         *
+         * Calling this constructor requires the use of setup() to configure.
+         */
+        ActiveSearchGrid();
+
+        /**
+         * \brief Constructor.
+         * \param _img_size_h horizontal image size, in pixels.
+         * \param _img_size_v vertical image size.
+         * \param _n_cells_h horizontal number of cells per image width.
+         * \param _n_cells_v vertical number of cells per image height.
+         * \param _margin minimum separation to the edge of the image
+         * \param _separation minimum separation between existing and new points.
+         */
+        ActiveSearchGrid(const int & _img_size_h, const int & _img_size_v,
+                         const int & _n_cells_h,  const int & _n_cells_v,
+                         const int & _margin = 0,
+                         const int & _separation = 0);
+
+        /**
+         * \brief Function to set the parameters of the active search grid
+         * \param _img_size_h horizontal image size, in pixels.
+         * \param _img_size_v vertical image size.
+         * \param _n_cells_h horizontal number of cells per image width.
+         * \param _n_cells_v vertical number of cells per image height.
+         * \param _margin minimum separation to the edge of the image
+         * \param _separation minimum separation between existing and new points.
+         */
+        void setup(const int & _img_size_h, const int & _img_size_v,
+                   const int & _n_cells_h,  const int & _n_cells_v,
+                   const int & _margin = 0,
+                   const int & _separation = 0);
+
+        /**
+         * \brief Re-set the image size
+         * \param _img_size_h horizontal image size, in pixels.
+         * \param _img_size_v vertical image size.
+         */
+        void resizeImage(unsigned int _img_size_h, unsigned int _img_size_v);
+
+        /** \brief Clear grid.
+         *
+         * Sets all cell counters to zero.
+         */
+        void clear();
+
+        /**
+         * \brief Clear grid and position it at a new random location.
+         *
+         * Sets all cell counters to zero and sets a new random grid position.
+         */
+        void renew();
+
+        /**
+         * \brief Add a projected pixel to the grid.
+         * If the cell is blocked, unblock and add.
+         * \param _x the x-coordinate of the pixel to add.
+         * \param _y the y-coordinate of the pixel to add.
+         */
+        template<typename Scalar>
+        void hitCell(const Scalar _x, const Scalar _y);
+
+        /**
+         * \brief Add a projected pixel to the grid.
+         * If the cell is blocked, unblock and add.
+         * \param _pix the pixel to add as an Eigen 2-vector with any Scalar type (can be a non-integer).
+         */
+        template<typename Scalar>
+        void hitCell(const Eigen::Matrix<Scalar, 2, 1>& _pix);
+
+        /**
+         * \brief Add a projected pixel to the grid.
+         * If the cell is blocked, unblock and add.
+         * \param _pix the pixel to add as a cv::KeyPoint.
+         */
+        void hitCell(const cv::KeyPoint& _pix);
+
+        /**
+         * \brief Get ROI of a random empty cell.
+         * \param _roi the resulting ROI
+         * \return true if ROI exists.
+         */
+        bool pickRoi(cv::Rect & _roi);
+
+        /**
+         * \brief Block a cell known to be empty in order to avoid searching again in it.
+         * \param _cell the cell where nothing was found
+         */
+        void blockCell(const Eigen::Vector2i & _cell);
+
+        /**
+         * \brief Call this after pickRoi if no point was found in the roi
+         * in order to avoid searching again in it.
+         * \param _roi the ROI where nothing was found
+         */
+        void blockCell(const cv::Rect & _roi);
+
+        /**
+         * \brief Get the region of interest, reduced by a margin.
+         */
+        void cell2roi(const Eigen::Vector2i & _cell, cv::Rect& _roi);
+
+    private:
+        /**
+         * \brief Get cell corresponding to pixel
+         */
+        template<typename Scalar>
+        Eigen::Vector2i coords2cell(const Scalar _x, const Scalar _y);
+
+        /**
+         * \brief Get cell origin (exact pixel)
+         */
+        Eigen::Vector2i cellOrigin(const Eigen::Vector2i & _cell);
+
+        /**
+         * \brief Get cell center (can be decimal if size of cell is an odd number of pixels)
+         */
+        Eigen::Vector2i cellCenter(const Eigen::Vector2i& _cell);
+
+        /**
+         * \brief Get one random empty cell
+         */
+        bool pickEmptyCell(Eigen::Vector2i & _cell);
+
+        /**
+         * \brief True if the cell is blocked
+         * \param _cell the queried cell
+         */
+        bool isCellBlocked(const Eigen::Vector2i& _cell);
+
+};
+
+inline void ActiveSearchGrid::clear()
+{
+    projections_count_.setZero();
+}
+
+inline void ActiveSearchGrid::renew()
+{
+    offset_(0) = -(margin_ + rand() % (cell_size_(0) - 2 * margin_)); // from -margin to -(cellSize(0)-margin)
+    offset_(1) = -(margin_ + rand() % (cell_size_(1) - 2 * margin_)); // from -margin to -(cellSize(0)-margin)
+    clear();
+}
+
+inline void ActiveSearchGrid::hitCell(const cv::KeyPoint& _pix)
+{
+    hitCell(_pix.pt.x, _pix.pt.y);
+}
+
+template<typename Scalar>
+inline void ActiveSearchGrid::hitCell(const Eigen::Matrix<Scalar, 2, 1>& _pix)
+{
+    hitCell(_pix(0), _pix(1));
+}
+
+template<typename Scalar>
+inline void ActiveSearchGrid::hitCell(const Scalar _x, const Scalar _y)
+{
+    Eigen::Vector2i cell = coords2cell(_x, _y);
+    if (cell(0) < 0 || cell(1) < 0 || cell(0) >= grid_size_(0) || cell(1) >= grid_size_(1))
+        return;
+
+    if (isCellBlocked(cell))
+        projections_count_(cell(0), cell(1)) = 0; // unblock cell: it becomes empty
+
+    projections_count_(cell(0), cell(1))++;
+}
+
+template<typename Scalar>
+inline Eigen::Vector2i ActiveSearchGrid::coords2cell(const Scalar _x, const Scalar _y)
+{
+    Eigen::Vector2i cell;
+    cell(0) = (_x - offset_(0)) / cell_size_(0);
+    cell(1) = (_y - offset_(1)) / cell_size_(1);
+    return cell;
+}
+
+inline Eigen::Vector2i ActiveSearchGrid::cellCenter(const Eigen::Vector2i& _cell)
+{
+    return cellOrigin(_cell) + cell_size_ / 2; // beware these are all integers, so the result is truncated to the smaller integer
+}
+
+}
+
+#endif /* ACTIVESEARCH_H_ */
diff --git a/include/vision/processor/processor_bundle_adjustment.h b/include/vision/processor/processor_bundle_adjustment.h
deleted file mode 100644
index f19001ae44678be64ae2d594681bd72f2521654d..0000000000000000000000000000000000000000
--- a/include/vision/processor/processor_bundle_adjustment.h
+++ /dev/null
@@ -1,256 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-/*
- * processor_bundle_adjustment.h
- *
- *  Created on: May 3, 2019
- *      Author: ovendrell
- */
-
-#ifndef INCLUDE_BASE_PROCESSOR_PROCESSOR_BUNDLE_ADJUSTMENT_H_
-#define INCLUDE_BASE_PROCESSOR_PROCESSOR_BUNDLE_ADJUSTMENT_H_
-
-//wolf includes
-#include "core/processor/processor_tracker_feature.h"
-#include "vision/capture/capture_image.h"
-#include "vision/landmark/landmark_hp.h"
-#include "vision/math/pinhole_tools.h"
-#include "vision/sensor/sensor_camera.h"
-#include "core/math/rotations.h"
-
-//vision utils includes
-#include "vision_utils/vision_utils.h"
-#include "vision_utils/detectors.h"
-#include "vision_utils/descriptors.h"
-#include "vision_utils/matchers.h"
-#include "../factor/factor_pixel_hp.h"
-
-namespace wolf{
-
-WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorBundleAdjustment);
-
-struct ParamsProcessorBundleAdjustment : public ParamsProcessorTrackerFeature
-{
-	std::string yaml_file_params_vision_utils;
-
-	bool delete_ambiguities;
-
-	int n_cells_h;
-	int n_cells_v;
-	int min_response_new_feature;
-
-	double pixel_noise_std; ///< std noise of the pixel
-	int min_track_length_for_factor; ///< Minimum track length of a matched feature to create a factor
-
-	ParamsProcessorBundleAdjustment() = default;
-
-};
-
-WOLF_PTR_TYPEDEFS(ProcessorBundleAdjustment);
-
-class ProcessorBundleAdjustment : public ProcessorTrackerFeature
-{
-    protected:
-        vision_utils::DetectorBasePtr det_ptr_;
-        vision_utils::DescriptorBasePtr des_ptr_;
-        vision_utils::MatcherBasePtr mat_ptr_;
-
-        ParamsProcessorBundleAdjustmentPtr params_bundle_adjustment_;  // Configuration parameters
-
-        CaptureImagePtr capture_image_last_;
-        CaptureImagePtr capture_image_incoming_;
-
-        SensorCameraPtr camera;
-
-        Matrix2d        pixel_cov_;
-
-        cv::Mat tvec_;
-        cv::Mat rvec_;
-
-
-        //TODO: correct to add this?
-        std::map<size_t, LandmarkBasePtr> lmk_track_map_; //LandmarkTrackMap;
-
-	public:
-		vision_utils::DetectorBasePtr   get_det_ptr_() {return det_ptr_;};
-        vision_utils::DescriptorBasePtr get_des_ptr_() {return des_ptr_;};
-        vision_utils::MatcherBasePtr    get_mat_ptr_() {return mat_ptr_;};
-
-    private:
-        int frame_count_;
-
-
-    public:
-        /** \brief Class constructor
-        */
-        ProcessorBundleAdjustment(ParamsProcessorBundleAdjustmentPtr _params_bundle_adjustment);
-        /** \brief Class destructor
-        */
-        ~ProcessorBundleAdjustment() override
-        {
-            //
-        }
-
-    public:
-
-        void configure(SensorBasePtr _sensor) override;
-
-        /** Pre-process incoming Capture
-         *
-         * This is called by process() just after assigning incoming_ptr_ to a valid Capture.
-         *
-         * Overload this function to prepare stuff on derived classes.
-         *
-         * Typical uses of prePrecess() are:
-         *   - casting base types to derived types
-         *   - initializing counters, flags, or any derived variables
-         *   - initializing algorithms needed for processing the derived data
-         */
-        void preProcess() override;
-
-        /** Post-process
-         *
-         * This is called by process() after finishing the processing algorithm.
-         *
-         * Overload this function to post-process stuff on derived classes.
-         *
-         * Typical uses of postPrecess() are:
-         *   - resetting and/or clearing variables and/or algorithms at the end of processing
-         *   - drawing / printing / logging the results of the processing
-         */
-        void postProcess() override;
-
-        /** \brief Track provided features in \b _capture
-         * \param _features_in input list of features in \b last to track
-         * \param _capture the capture in which the _features_in should be searched
-         * \param _features_out returned list of features found in \b _capture
-         * \param _feature_correspondences returned map of correspondences: _feature_correspondences[feature_out_ptr] = feature_in_ptr
-         *
-         * \return the number of features tracked
-         */
-        unsigned int trackFeatures(const FeatureBasePtrList& _features_in,
-                                           const CaptureBasePtr& _capture,
-                                           FeatureBasePtrList& _features_out,
-                                           FeatureMatchMap& _feature_correspondences) override;
-
-        /** \brief Correct the drift in incoming feature by re-comparing against the corresponding feature in origin.
-         * \param _origin_feature input feature in origin capture tracked
-         * \param _incoming_feature input/output feature in incoming capture to be corrected
-         * \return false if the the process discards the correspondence with origin's feature
-         */
-        bool correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature) override;
-
-        /** \brief Vote for KeyFrame generation
-         *
-         * If a KeyFrame criterion is validated, this function returns true,
-         * meaning that it wants to create a KeyFrame at the \b last Capture.
-         *
-         * WARNING! This function only votes! It does not create KeyFrames!
-         */
-        bool voteForKeyFrame() const override;
-
-        bool isInlier(const cv::KeyPoint& _kp_incoming, const cv::KeyPoint& _kp_last) const;
-
-        bool is_tracked(int& kp_idx_) const;
-
-        /** \brief Detect new Features
-         * \param _max_features maximum number of features detected (-1: unlimited. 0: none)
-         * \param _capture The capture in which the new features should be detected.
-         * \param _features_out The list of detected Features in _capture.
-         * \return The number of detected Features.
-         *
-         * This function detects Features that do not correspond to known Features/Landmarks in the system.
-         *
-         * IMPORTANT: The features in _features_out should be emplaced. Don't use `make_shared`, use `FeatureBase::emplace` instead.
-         * Then, they will be already linked to the _capture.
-         * If you detect all the features at once in preprocess(), you should either emplace them (`FeatureBase::emplace()`) and remove the not returned features in _features_out (`FeatureBase::remove()`),
-         * or create them (`make_shared()`) and link all the returned features in _features_out (`FeatureBase::link(_capture)`).
-         *
-         * The function is called in ProcessorTrackerFeature::processNew() to set the member new_features_last_,
-         * the list of newly detected features of the capture last_ptr_.
-         */
-        unsigned int detectNewFeatures(const int& _max_new_features,
-                                               const CaptureBasePtr& _capture,
-                                               FeatureBasePtrList& _features_out) override;
-
-        /** \brief Emplaces a new factor
-         * \param _feature_ptr pointer to the parent Feature
-         * \param _feature_other_ptr pointer to the other feature constrained.
-         *
-         * Implement this method in derived classes.
-         *
-         * This function emplaces a factor of the appropriate type for the derived processor.
-         */
-        FactorBasePtr emplaceFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr) override;
-
-        virtual LandmarkBasePtr emplaceLandmark(FeatureBasePtr _feature_ptr);
-
-        /** \brief Establish factors between features in Captures \b last and \b origin
-         */
-        void establishFactors() override;
-
-        void setParams(const ParamsProcessorBundleAdjustmentPtr _params);
-
-    public:
-        /// @brief Factory method
-        static ProcessorBasePtr create(const std::string& _unique_name,
-                                       const ParamsProcessorBasePtr _params);
-
-    private:
-
-        cv::Mat image_debug_;
-
-    public:
-
-        /**
-         * \brief Return Image for debug purposes
-         */
-        cv::Mat getImageDebug() const;
-
-        /**
-         * \brief Return list of Features tracked in a Capture
-         */
-        std::list<FeatureBasePtr> trackedFeatures(const CaptureBasePtr& _capture_ptr) const;
-        /**
-        * \brief Return list of Landmarks
-        */
-        std::list<LandmarkBasePtr> currentLandmarks() const;
-};
-
-inline cv::Mat ProcessorBundleAdjustment::getImageDebug() const
-{
-    return image_debug_;
-}
-
-inline std::list<FeatureBasePtr> ProcessorBundleAdjustment::trackedFeatures(const CaptureBasePtr& _capture_ptr) const
-{
-	return track_matrix_.snapshotAsList(_capture_ptr);
-}
-
-inline std::list<LandmarkBasePtr> ProcessorBundleAdjustment::currentLandmarks() const
-{
-	return getProblem()->getMap()->getLandmarkList();
-}
-
-} //namespace wolf
-
-#endif /* INCLUDE_BASE_PROCESSOR_PROCESSOR_BUNDLE_ADJUSTMENT_H_ */
diff --git a/include/vision/processor/processor_params_image.h b/include/vision/processor/processor_params_image.h
deleted file mode 100644
index b803d31ec261ad0fc85a1c0aa488aa3f3768b06a..0000000000000000000000000000000000000000
--- a/include/vision/processor/processor_params_image.h
+++ /dev/null
@@ -1,100 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#ifndef PROCESSOR_IMAGE_PARAMS_H
-#define PROCESSOR_IMAGE_PARAMS_H
-
-// wolf
-#include "core/processor/processor_tracker_feature.h"
-#include "core/processor/processor_tracker_landmark.h"
-#include "core/utils/params_server.h"
-
-namespace wolf
-{
-
-WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorTrackerFeatureImage);
-
-struct ParamsProcessorTrackerFeatureImage : public ParamsProcessorTrackerFeature
-{
-        std::string yaml_file_params_vision_utils;
-
-        double min_response_for_new_features; ///< minimum value of the response to create a new feature
-        double distance;
-
-        double pixel_noise_std; ///< std noise of the pixel
-        double pixel_noise_var; ///< var noise of the pixel
-    ParamsProcessorTrackerFeatureImage() = default;
-    ParamsProcessorTrackerFeatureImage(std::string _unique_name, const ParamsServer& _server):
-        ParamsProcessorTrackerFeature(_unique_name, _server)
-    {
-        yaml_file_params_vision_utils   = _server.getParam<std::string>(_unique_name    + "/yaml_file_params_vision_utils");
-        min_response_for_new_features   = _server.getParam<double>(_unique_name         + "/min_response_for_new_features");
-        distance                        = _server.getParam<double>(_unique_name         + "/distance");
-        pixel_noise_std                 = _server.getParam<double>(_unique_name         + "/pixel_noise_std");
-        pixel_noise_var                 = _server.getParam<double>(_unique_name         + "/pixel_noise_var");
-    }
-    std::string print() const override
-    {
-        return "\n" + ParamsProcessorTrackerFeature::print()                                    + "\n"
-            + "yaml_file_params_vision_utils: " + yaml_file_params_vision_utils                 + "\n"
-            + "min_response_for_new_features: " + std::to_string(min_response_for_new_features) + "\n"
-            + "distance: "                      + std::to_string(distance)                      + "\n"
-            + "pixel_noise_std: "               + std::to_string(pixel_noise_std)               + "\n"
-            + "pixel_noise_var: "               + std::to_string(pixel_noise_var)               + "\n";
-    }
-};
-
-WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorTrackerLandmarkImage);
-
-struct ParamsProcessorTrackerLandmarkImage : public ParamsProcessorTrackerLandmark
-{
-        std::string yaml_file_params_vision_utils;
-
-        double min_response_for_new_features; ///< minimum value of the response to create a new feature
-        double distance;
-
-        double pixel_noise_std; ///< std noise of the pixel
-        double pixel_noise_var; ///< var noise of the pixel
-
-        ParamsProcessorTrackerLandmarkImage(std::string _unique_name, const ParamsServer& _server):
-            ParamsProcessorTrackerLandmark(_unique_name, _server)
-        {
-            yaml_file_params_vision_utils   = _server.getParam<std::string>(_unique_name    + "/yaml_file_params_vision_utils");
-
-            min_response_for_new_features   = _server.getParam<double>(_unique_name         + "/min_response_for_new_features");
-            distance                        = _server.getParam<double>(_unique_name         + "/distance");
-
-            pixel_noise_std                 = _server.getParam<double>(_unique_name         + "/pixel_noise_std");
-            pixel_noise_var                 = _server.getParam<double>(_unique_name         + "/pixel_noise_var");
-        }
-        std::string print() const override
-        {
-            return "\n" + ParamsProcessorTrackerLandmark::print()                                       + "\n"
-                    + "yaml_file_params_vision_utils: " + yaml_file_params_vision_utils                 + "\n"
-                    + "min_response_for_new_features: " + std::to_string(min_response_for_new_features) + "\n"
-                    + "distance: "                      + std::to_string(distance)                      + "\n"
-                    + "pixel_noise_std: "               + std::to_string(pixel_noise_std)               + "\n"
-                    + "pixel_noise_var: "               + std::to_string(pixel_noise_var)               + "\n";
-        }
-};
-}
-
-#endif // PROCESSOR_IMAGE_PARAMS_H
diff --git a/include/vision/processor/processor_tracker_feature_image.h b/include/vision/processor/processor_tracker_feature_image.h
deleted file mode 100644
index f85e79e3ccb820e4eaa0b6a98843d2a9a1622a4a..0000000000000000000000000000000000000000
--- a/include/vision/processor/processor_tracker_feature_image.h
+++ /dev/null
@@ -1,215 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#ifndef PROCESSOR_TRACKER_FEATURE_IMAGE_H
-#define PROCESSOR_TRACKER_FEATURE_IMAGE_H
-
-#include "vision/sensor/sensor_camera.h"
-#include "vision/capture/capture_image.h"
-#include "vision/feature/feature_point_image.h"
-#include "vision/processor/processor_params_image.h"
-#include "vision/processor/processor_params_image.h"
-#include "vision/factor/factor_epipolar.h"
-
-// Wolf includes
-#include <core/state_block/state_block.h>
-#include <core/state_block/state_quaternion.h>
-#include <core/processor/processor_tracker_feature.h>
-
-// vision_utils
-#include <vision_utils/detectors/detector_base.h>
-#include <vision_utils/descriptors/descriptor_base.h>
-#include <vision_utils/matchers/matcher_base.h>
-#include <vision_utils/algorithms/activesearch/alg_activesearch.h>
-
-// General includes
-#include <cmath>
-#include <complex>      // std::complex, std::norm
-
-namespace wolf
-{
-
-WOLF_PTR_TYPEDEFS(ProcessorTrackerFeatureImage);
-
-//class
-class ProcessorTrackerFeatureImage : public ProcessorTrackerFeature
-{
-        // vision utils params
-    protected:
-
-        vision_utils::DetectorBasePtr det_ptr_;
-        vision_utils::DescriptorBasePtr des_ptr_;
-        vision_utils::MatcherBasePtr mat_ptr_;
-        vision_utils::AlgorithmACTIVESEARCHPtr active_search_ptr_;  // Active Search
-
-        int cell_width_; ///< Active search cell width
-        int cell_height_; ///< Active search cell height
-        vision_utils::AlgorithmParamsACTIVESEARCHPtr params_tracker_feature_image_activesearch_ptr_; ///< Active search parameters
-
-    protected:
-
-        ParamsProcessorTrackerFeatureImagePtr params_tracker_feature_image_;           ///< Struct with parameters of the processors
-        cv::Mat image_last_, image_incoming_;   ///< Images of the "last" and "incoming" Captures
-
-        struct
-        {
-                unsigned int width_; ///< width of the image
-                unsigned int height_; ///< height of the image
-        } image_;
-
-    public:
-
-        // Lists to store values to debug
-        std::list<cv::Rect> tracker_roi_;
-        std::list<cv::Rect> tracker_roi_inflated_;
-        std::list<cv::Rect> detector_roi_;
-        std::list<cv::Point> tracker_target_;
-
-        ProcessorTrackerFeatureImage(ParamsProcessorTrackerFeatureImagePtr _params_image);
-        ~ProcessorTrackerFeatureImage() override;
-
-        void configure(SensorBasePtr _sensor) override ;
-
-    protected:
-
-        /**
-         * \brief Does cast of the images and renews the active grid.
-         */
-        void preProcess() override;
-
-        /**
-         * \brief Does the drawing of the features.
-         *
-         * Used for debugging
-         */
-        void postProcess() override;
-
-        void advanceDerived() override
-        {
-            ProcessorTrackerFeature::advanceDerived();
-            image_last_ = image_incoming_;
-        }
-
-        void resetDerived() override
-        {
-            ProcessorTrackerFeature::resetDerived();
-            image_last_ = image_incoming_;
-        }
-
-        /** \brief Track provided features in \b _capture
-         * \param _features_in input list of features in \b last to track
-         * \param _capture the capture in which the _features_in should be searched
-         * \param _features_out returned list of features found in \b _capture
-         * \param _feature_correspondences returned map of correspondences: _feature_correspondences[feature_out_ptr] = feature_in_ptr
-         *
-         * \return the number of features tracked
-         */
-        unsigned int trackFeatures(const FeatureBasePtrList& _features_in,
-                                           const CaptureBasePtr& _capture,
-                                           FeatureBasePtrList& _features_out,
-                                           FeatureMatchMap& _feature_correspondences) override;
-
-        /** \brief Correct the drift in incoming feature by re-comparing against the corresponding feature in origin.
-         * \param _last_feature input feature in last capture tracked
-         * \param _incoming_feature input/output feature in incoming capture to be corrected
-         * \return false if the the process discards the correspondence with origin's feature
-         */
-        bool correctFeatureDrift(const FeatureBasePtr _origin_feature,
-                                         const FeatureBasePtr _last_feature,
-                                         FeatureBasePtr _incoming_feature) override;
-
-        /** \brief Vote for KeyFrame generation
-         *
-         * If a KeyFrame criterion is validated, this function returns true,
-         * meaning that it wants to create a KeyFrame at the \b last Capture.
-         *
-         * WARNING! This function only votes! It does not create KeyFrames!
-         */
-        bool voteForKeyFrame() const override;
-
-        /** \brief Detect new Features
-         * \param _max_features maximum number of features detected (-1: unlimited. 0: none)
-         * \param _capture The capture in which the new features should be detected.
-         * \param _features_out The list of detected Features in _capture.
-         * \return The number of detected Features.
-         *
-         * This function detects Features that do not correspond to known Features/Landmarks in the system.
-         *
-         * IMPORTANT: The features in _features_out should be emplaced. Don't use `make_shared`, use `FeatureBase::emplace` instead.
-         * Then, they will be already linked to the _capture.
-         * If you detect all the features at once in preprocess(), you should either emplace them (`FeatureBase::emplace()`) and remove the not returned features in _features_out (`FeatureBase::remove()`),
-         * or create them (`make_shared()`) and link all the returned features in _features_out (`FeatureBase::link(_capture)`).
-         *
-         * The function is called in ProcessorTrackerFeature::processNew() to set the member new_features_last_,
-         * the list of newly detected features of the capture last_ptr_.
-         */
-        unsigned int detectNewFeatures(const int& _max_new_features,
-                                               const CaptureBasePtr& _capture,
-                                               FeatureBasePtrList& _features_out) override;
-
-        /** \brief Emplaces a new factor
-         * \param _feature_ptr pointer to the parent Feature
-         * \param _feature_other_ptr pointer to the other feature constrained.
-         *
-         * Implement this method in derived classes.
-         *
-         * This function emplaces a factor of the appropriate type for the derived processor.
-         */
-        FactorBasePtr emplaceFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr) override;
-
-    private:
-
-        /**
-         * \brief Detects keypoints its descriptors in a specific roi of the image
-         * \param _image input image in which the algorithm will search
-         * \param _roi input roi used to define the area of search within the image
-         * \param _new_keypoints output keypoints obtained in the function
-         * \param new_descriptors output descriptors obtained in the function
-         * \return the number of detected features
-         */
-        virtual unsigned int detect(cv::Mat _image, cv::Rect& _roi, std::vector<cv::KeyPoint>& _new_keypoints,
-                                    cv::Mat& _new_descriptors);
-
-        /**
-         * \brief Does the match between a target descriptor and (potentially) multiple candidate descriptors of a Feature.
-         * \param _target_descriptor descriptor of the target
-         * \param _candidate_descriptors descriptors of the candidates
-         * \param _cv_matches output variable in which the best result will be stored (in the position [0])
-         * \return normalized score of similarity (1 - exact match; 0 - complete mismatch)
-         */
-        virtual double match(cv::Mat _target_descriptor, cv::Mat _candidate_descriptors,
-                             std::vector<cv::DMatch>& _cv_matches);
-
-        // These only to debug, will disappear one day
-    public:
-        virtual void drawFeatures(cv::Mat _image);
-        virtual void drawTarget(cv::Mat _image, std::list<cv::Point> _target_list);
-        virtual void drawRoi(cv::Mat _image, std::list<cv::Rect> _roi_list, cv::Scalar _color);
-        virtual void resetVisualizationFlag(const FeatureBasePtrList& _feature_list_last);
-
-    public:
-        static ProcessorBasePtr create(const std::string& _unique_name, const ParamsProcessorBasePtr _params);
-
-};
-
-} // namespace wolf
-
-#endif // PROCESSOR_TRACKER_FEATURE_IMAGE_H
diff --git a/include/vision/processor/processor_tracker_feature_trifocal.h b/include/vision/processor/processor_tracker_feature_trifocal.h
deleted file mode 100644
index 17b6fb8373e557f90482676a60493d0ce472fd00..0000000000000000000000000000000000000000
--- a/include/vision/processor/processor_tracker_feature_trifocal.h
+++ /dev/null
@@ -1,232 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#ifndef _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_
-#define _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_
-
-//Wolf includes
-#include "core/processor/processor_tracker_feature.h"
-#include "vision/capture/capture_image.h"
-#include "core/utils/params_server.h"
-
-// Vision utils
-#include <vision_utils/vision_utils.h>
-#include <vision_utils/detectors/detector_base.h>
-#include <vision_utils/descriptors/descriptor_base.h>
-#include <vision_utils/matchers/matcher_base.h>
-#include <vision_utils/algorithms/activesearch/alg_activesearch.h>
-
-namespace wolf
-{
-
-WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorTrackerFeatureTrifocal);
-
-struct ParamsProcessorTrackerFeatureTrifocal : public ParamsProcessorTrackerFeature
-{
-    std::string yaml_file_params_vision_utils;
-
-    int n_cells_h;
-    int n_cells_v;
-    int min_response_new_feature;
-    double pixel_noise_std; ///< std noise of the pixel
-    int min_track_length_for_factor; ///< Minimum track length of a matched feature to create a factor
-    bool debug_view;
-
-    ParamsProcessorTrackerFeatureTrifocal() = default;
-    ParamsProcessorTrackerFeatureTrifocal(std::string _unique_name, const ParamsServer& _server):
-        ParamsProcessorTrackerFeature(_unique_name, _server)
-    {
-        yaml_file_params_vision_utils   = _server.getParam<std::string>(_unique_name    + "/yaml_file_params_vision_utils");
-        n_cells_h                       = _server.getParam<int>(_unique_name            + "/n_cells_h");
-        n_cells_v                       = _server.getParam<int>(_unique_name            + "/n_cells_v");
-        min_response_new_feature        = _server.getParam<int>(_unique_name            + "/min_response_new_feature");
-        pixel_noise_std                 = _server.getParam<double>(_unique_name         + "/pixel_noise_std");
-        min_track_length_for_factor     = _server.getParam<int>(_unique_name            + "/min_track_length_for_factor");
-        debug_view                      = _server.getParam<bool>(_unique_name           + "/debug_view");
-    }
-    std::string print() const override
-    {
-        return "\n" + ParamsProcessorTrackerFeature::print()                                    + "\n"
-            + "yaml_file_params_vision_utils: " + yaml_file_params_vision_utils                 + "\n"
-            + "n_cells_h: "                     + std::to_string(n_cells_h)                     + "\n"
-            + "n_cells_v: "                     + std::to_string(n_cells_v)                     + "\n"
-            + "min_response_new_feature: "      + std::to_string(min_response_new_feature)      + "\n"
-            + "pixel_noise_std: "               + std::to_string(pixel_noise_std)               + "\n"
-            + "min_track_length_for_factor: "   + std::to_string(min_track_length_for_factor)   + "\n"
-            + "debug_view: "                    + std::to_string(debug_view)                    + "\n";
-    }
-};
-
-WOLF_PTR_TYPEDEFS(ProcessorTrackerFeatureTrifocal);
-
-class ProcessorTrackerFeatureTrifocal : public ProcessorTrackerFeature
-{
-        // Parameters for vision_utils
-    protected:
-        vision_utils::DetectorBasePtr   det_ptr_;
-        vision_utils::DescriptorBasePtr des_ptr_;
-        vision_utils::MatcherBasePtr    mat_ptr_;
-
-    protected:
-
-        ParamsProcessorTrackerFeatureTrifocalPtr params_tracker_feature_trifocal_;      ///< Configuration parameters
-
-        CaptureImagePtr capture_image_last_;
-        CaptureImagePtr capture_image_incoming_;
-        Matrix2d        pixel_cov_;
-
-    private:
-        CaptureBasePtr prev_origin_ptr_;                    ///< Capture previous to origin_ptr_ for the third focus of the trifocal.
-        bool initialized_;                                  ///< Flags the situation where three focus are available: prev_origin, origin, and last.
-
-    public:
-
-        /** \brief Class constructor
-         */
-        ProcessorTrackerFeatureTrifocal( ParamsProcessorTrackerFeatureTrifocalPtr _params_tracker_feature_trifocal );
-
-        /** \brief Class Destructor
-         */
-        ~ProcessorTrackerFeatureTrifocal() override;
-        void configure(SensorBasePtr _sensor) override;
-
-        /** \brief Track provided features in \b _capture
-         * \param _features_in input list of features in \b last to track
-         * \param _capture the capture in which the _features_in should be searched
-         * \param _features_out returned list of features found in \b _capture
-         * \param _feature_correspondences returned map of correspondences: _feature_correspondences[feature_out_ptr] = feature_in_ptr
-         *
-         * \return the number of features tracked
-         */
-        unsigned int trackFeatures(const FeatureBasePtrList& _features_in,
-                                           const CaptureBasePtr& _capture,
-                                           FeatureBasePtrList& _features_out,
-                                           FeatureMatchMap& _feature_correspondences) override;
-
-        /** \brief Correct the drift in incoming feature by re-comparing against the corresponding feature in origin.
-         * \param _origin_feature input feature in origin capture tracked
-         * \param _incoming_feature input/output feature in incoming capture to be corrected
-         * \return false if the the process discards the correspondence with origin's feature
-         */
-        bool correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature) override;
-
-        /** \brief Vote for KeyFrame generation
-         *
-         * If a KeyFrame criterion is validated, this function returns true,
-         * meaning that it wants to create a KeyFrame at the \b last Capture.
-         *
-         * WARNING! This function only votes! It does not create KeyFrames!
-         */
-        bool voteForKeyFrame() const override;
-
-        /** \brief Detect new Features
-         * \param _max_features maximum number of features detected (-1: unlimited. 0: none)
-         * \param _capture The capture in which the new features should be detected.
-         * \param _features_out The list of detected Features in _capture.
-         * \return The number of detected Features.
-         *
-         * This function detects Features that do not correspond to known Features/Landmarks in the system.
-         *
-         * The function is called in ProcessorTrackerFeature::processNew() to set the member new_features_last_,
-         * the list of newly detected features of the capture last_ptr_.
-         */
-        unsigned int detectNewFeatures(const int& _max_new_features,
-                                               const CaptureBasePtr& _capture,
-                                               FeatureBasePtrList& _features_out) override;
-
-        /** \brief Emplaces a new factor
-         * \param _feature_ptr pointer to the parent Feature
-         * \param _feature_other_ptr pointer to the other feature constrained.
-         *
-         * This function emplaces a factor of the appropriate type for the derived processor.
-         */
-        FactorBasePtr emplaceFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr) override;
-
-        //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-        /** \brief advance pointers
-         *
-         */
-        void advanceDerived() override;
-
-        /** \brief reset pointers and match lists at KF creation
-         *
-         */
-        void resetDerived() override;
-
-        /** \brief Pre-process: check if all captures (prev-origin, origin, last) are initialized to allow factors creation
-         *
-         */
-        void preProcess() override;
-
-        /** \brief Post-process
-         *
-         */
-        void postProcess() override;
-
-        /** \brief Establish factors between features in Captures \b last and \b origin
-         */
-        void establishFactors() override;
-
-        CaptureBasePtr getPrevOrigin();
-
-        bool isInlier(const cv::KeyPoint& _kp_incoming, const cv::KeyPoint& _kp_last);
-
-        void setParams(const ParamsProcessorTrackerFeatureTrifocalPtr _params);
-
-    public:
-
-        /// @brief Factory method
-        static ProcessorBasePtr create(const std::string& _unique_name,
-                                       const ParamsProcessorBasePtr _params);
-    private:
-
-        cv::Mat image_debug_;
-
-    public:
-
-        /**
-         * \brief Return Image for debug purposes
-         */
-        cv::Mat getImageDebug();
-
-        /// \brief Get pixel covariance
-        const Matrix2d& pixelCov() const;
-};
-
-inline CaptureBasePtr ProcessorTrackerFeatureTrifocal::getPrevOrigin()
-{
-    return prev_origin_ptr_;
-}
-
-inline cv::Mat ProcessorTrackerFeatureTrifocal::getImageDebug()
-{
-    return image_debug_;
-}
-
-inline const Eigen::Matrix2d& ProcessorTrackerFeatureTrifocal::pixelCov() const
-{
-    return pixel_cov_;
-}
-
-} // namespace wolf
-
-#endif /* _PROCESSOR_TRACKER_FEATURE_TRIFOCAL_H_ */
diff --git a/include/vision/processor/processor_tracker_landmark_image.h b/include/vision/processor/processor_tracker_landmark_image.h
deleted file mode 100644
index 00aa9f799c3152ca3cceb48054b267c724026d06..0000000000000000000000000000000000000000
--- a/include/vision/processor/processor_tracker_landmark_image.h
+++ /dev/null
@@ -1,225 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#ifndef PROCESSOR_TRACKER_LANDMARK_IMAGE_H
-#define PROCESSOR_TRACKER_LANDMARK_IMAGE_H
-
-// Wolf includes
-
-#include "vision/landmark/landmark_ahp.h"
-#include "core/landmark/landmark_match.h"
-#include "vision/processor/processor_params_image.h"
-#include "core/processor/processor_tracker_landmark.h"
-#include "core/common/wolf.h"
-
-#include <vision_utils/algorithms/activesearch/alg_activesearch.h>
-#include <vision_utils/descriptors/descriptor_base.h>
-#include <vision_utils/detectors/detector_base.h>
-#include <vision_utils/matchers/matcher_base.h>
-
-#include <opencv2/core/mat.hpp>
-#include <opencv2/core/mat.inl.hpp>
-#include <opencv2/core/types.hpp>
-
-#include <list>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace wolf {
-
-WOLF_PTR_TYPEDEFS(ProcessorTrackerLandmarkImage);
-
-//Class
-class ProcessorTrackerLandmarkImage : public ProcessorTrackerLandmark
-{
-    protected:
-
-        vision_utils::DetectorBasePtr det_ptr_;
-        vision_utils::DescriptorBasePtr des_ptr_;
-        vision_utils::MatcherBasePtr mat_ptr_;
-        vision_utils::AlgorithmACTIVESEARCHPtr active_search_ptr_;  // Active Search
-
-        int cell_width_; ///< Active search cell width
-        int cell_height_; ///< Active search cell height
-        vision_utils::AlgorithmParamsACTIVESEARCHPtr params_tracker_landmark_image_activesearch_ptr_; ///< Active search parameters
-
-    protected:
-        ParamsProcessorTrackerLandmarkImagePtr params_tracker_landmark_image_;           // Struct with parameters of the processors
-
-        cv::Mat image_last_, image_incoming_;   // Images of the "last" and "incoming" Captures
-
-        struct
-        {
-                unsigned int width_; ///< width of the image
-                unsigned int height_; ///< height of the image
-        }image_;
-
-        unsigned int landmarks_tracked_ = 0;
-
-        /* pinhole params */
-        Eigen::Vector2d distortion_;
-        Eigen::Vector2d correction_;
-
-        /* transformations */
-        Eigen::Vector3d world2cam_translation_;
-        Eigen::Vector4d world2cam_orientation_;
-
-        Eigen::Vector3d cam2world_translation_;
-        Eigen::Vector4d cam2world_orientation_;
-
-        unsigned int n_feature_;
-        unsigned int landmark_idx_non_visible_;
-
-        std::list<float> list_response_;
-
-    public:
-
-        // Lists to store values to debug
-        std::list<cv::Rect> tracker_roi_;
-        std::list<cv::Rect> detector_roi_;
-        std::list<cv::Point> tracker_target_;
-        FeatureBasePtrList feat_lmk_found_;
-
-        ProcessorTrackerLandmarkImage(ParamsProcessorTrackerLandmarkImagePtr _params_tracker_landmark_image);
-        ~ProcessorTrackerLandmarkImage() override;
-
-        void configure(SensorBasePtr _sensor) override;
-
-    protected:
-
-        /**
-         * \brief Does cast of the images and renews the active grid.
-         */
-        void preProcess() override;
-
-        void advanceDerived() override
-        {
-            ProcessorTrackerLandmark::advanceDerived();
-            image_last_ = image_incoming_;
-        }
-
-        void resetDerived() override
-        {
-            ProcessorTrackerLandmark::resetDerived();
-            image_last_ = image_incoming_;
-        }
-
-        /**
-         * \brief Does the drawing of the features.
-         *
-         * Used for debugging
-         */
-        void postProcess() override;
-
-        /** \brief Find provided landmarks as features in the provided capture
-         * \param _landmarks_in input list of landmarks to be found
-         * \param _capture the capture in which the _landmarks_in should be searched
-         * \param _features_out returned list of features  found in \b _capture corresponding to a landmark of _landmarks_in
-         * \param _feature_landmark_correspondences returned map of landmark correspondences: _feature_landmark_correspondences[_feature_out_ptr] = landmark_in_ptr
-         *
-         * \return the number of landmarks found
-         */
-        unsigned int findLandmarks(const LandmarkBasePtrList& _landmarks_in,
-                                           const CaptureBasePtr& _capture,
-                                           FeatureBasePtrList& _features_out,
-                                           LandmarkMatchMap& _feature_landmark_correspondences) override;
-
-        /** \brief Vote for KeyFrame generation
-         *
-         * If a KeyFrame criterion is validated, this function returns true,
-         * meaning that it wants to create a KeyFrame at the \b last Capture.
-         *
-         * WARNING! This function only votes! It does not create KeyFrames!
-         */
-        bool voteForKeyFrame() const override;
-
-        /** \brief Detect new Features
-         * \param _max_features maximum number of features detected (-1: unlimited. 0: none)
-         * \param _capture The capture in which the new features should be detected.
-         * \param _features_out The list of detected Features in _capture.
-         * \return The number of detected Features.
-         *
-         * This function detects Features that do not correspond to known Features/Landmarks in the system.
-         *
-         * IMPORTANT: The features in _features_out should be emplaced. Don't use `make_shared`, use `FeatureBase::emplace` instead.
-         * Then, they will be already linked to the _capture.
-         * If you detect all the features at once in `preprocess()`, you should either emplace them (`FeatureBase::emplace()`) and remove the not returned features in _features_out (`FeatureBase::remove()`),
-         * or create them (`make_shared()`) and link all the returned features in _features_out (`FeatureBase::link(_capture)`).
-         *
-         * The function is called in ProcessorTrackerLandmark::processNew() to set the member new_features_last_,
-         * the list of newly detected features of the capture last_ptr_.
-         */
-        unsigned int detectNewFeatures(const int& _max_new_features,
-                                               const CaptureBasePtr& _capture,
-                                               FeatureBasePtrList& _features_out) override;
-
-        /** \brief Emplaces one landmark
-         */
-        LandmarkBasePtr emplaceLandmark(FeatureBasePtr _feature_ptr) override;
-
-    public:
-        static ProcessorBasePtr create(const std::string& _unique_name, const ParamsProcessorBasePtr _params);
-
-        /** \brief Emplaces a new factor
-         * \param _feature_ptr pointer to the Feature to constrain
-         * \param _landmark_ptr LandmarkBase pointer to the Landmark constrained.
-         */
-        FactorBasePtr emplaceFactor(FeatureBasePtr _feature_ptr, LandmarkBasePtr _landmark_ptr) override;
-
-        //Other functions
-    private:
-
-        /**
-         * \brief Detects keypoints its descriptors in a specific roi of the image
-         * \param _image input image in which the algorithm will search
-         * \param _roi input roi used to define the area of search within the image
-         * \param _new_keypoints output keypoints obtained in the function
-         * \param new_descriptors output descriptors obtained in the function
-         * \return the number of detected features
-         */
-        unsigned int detect(const cv::Mat _image, cv::Rect& _roi, std::vector<cv::KeyPoint>& _new_keypoints,cv::Mat& new_descriptors);
-
-        /**
-         * \brief Does the match between a target descriptor and (potentially) multiple candidate descriptors of a Feature.
-         * \param _target_descriptor descriptor of the target
-         * \param _candidate_descriptors descriptors of the candidates
-         * \param _cv_matches output variable in which the best result will be stored (in the position [0])
-         * \return normalized score of similarity (1 - exact match; 0 - complete mismatch)
-         */
-        double match(const cv::Mat _target_descriptor, const cv::Mat _candidate_descriptors, std::vector<cv::DMatch>& _cv_matches);
-
-        void landmarkInCurrentCamera(const Eigen::VectorXd& _frame_state,
-                                     const LandmarkAhpPtr   _landmark,
-                                     Eigen::Vector4d&       _point3d_hmg);
-
-        // These only to debug, will disappear one day soon
-    public:
-        void drawLandmarks(cv::Mat _image);
-        void drawFeaturesFromLandmarks(cv::Mat _image);
-        void drawRoi(cv::Mat _image, std::list<cv::Rect> _roi_list, cv::Scalar _color);
-        void drawTrackerRoi(cv::Mat _image, cv::Scalar _color);
-
-};
-
-} // namespace wolf
-
-#endif // PROCESSOR_TRACKER_LANDMARK_IMAGE_H
diff --git a/include/vision/processor/processor_visual_odometry.h b/include/vision/processor/processor_visual_odometry.h
new file mode 100644
index 0000000000000000000000000000000000000000..9cf5b0160eebe0a48759b64b31b4a79b87e54636
--- /dev/null
+++ b/include/vision/processor/processor_visual_odometry.h
@@ -0,0 +1,300 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+
+#ifndef INCLUDE_BASE_PROCESSOR_PROCESSOR_VISUAL_ODOMETRY_H_
+#define INCLUDE_BASE_PROCESSOR_PROCESSOR_VISUAL_ODOMETRY_H_
+
+
+// wolf plugin includes
+#include "vision/math/pinhole_tools.h"
+#include "vision/sensor/sensor_camera.h"
+#include "vision/capture/capture_image.h"
+#include "vision/feature/feature_point_image.h"
+#include "vision/landmark/landmark_hp.h"
+#include "vision/factor/factor_pixel_hp.h"
+#include "vision/processor/active_search.h"
+
+// wolf includes
+#include <core/math/rotations.h>
+#include <core/processor/processor_tracker.h>
+#include <core/processor/track_matrix.h>
+
+// Opencv includes
+#include <opencv2/core.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/video/tracking.hpp>
+
+
+
+namespace wolf{
+
+WOLF_STRUCT_PTR_TYPEDEFS(ParamsProcessorVisualOdometry);
+
+struct ParamsProcessorVisualOdometry : public ParamsProcessorTracker
+{
+    struct RansacParams
+    {
+        double prob;
+        double thresh;
+    };
+
+    struct KltParams
+    {
+        int patch_width;
+        int patch_height;
+        double max_err;
+        int nlevels_pyramids;
+        cv::TermCriteria criteria;
+    };
+
+    struct FastParams
+    {
+        int threshold;
+        bool non_max_suppresion;
+    };
+
+    struct GridParams
+    {
+        unsigned int nbr_cells_h;
+        unsigned int nbr_cells_v;
+        unsigned int margin;
+        unsigned int separation;
+    };
+
+    struct EqualizationParams
+    {
+            unsigned int method; // 0: none; 1: average; 2: histogram; 3: CLAHE
+            // note: cv::histogramEqualization() has no tuning params
+            struct AverageParams
+            {
+                    int median;
+            } average;
+            struct ClaheParams
+            {
+                    double clip_limit;
+                    cv::Size2i tile_grid_size;
+            } clahe;
+    };
+
+    RansacParams ransac;
+    KltParams klt;
+    FastParams fast;
+    GridParams grid;
+    EqualizationParams equalization;
+    double std_pix;
+    unsigned int min_track_length_for_landmark;
+
+    ParamsProcessorVisualOdometry() = default;
+    ParamsProcessorVisualOdometry(std::string _unique_name, const ParamsServer& _server):
+        ParamsProcessorTracker(_unique_name, _server)
+    {
+        std_pix = _server.getParam<double>(prefix + _unique_name + "/std_pix");
+
+        equalization.method = _server.getParam<unsigned int>(prefix + _unique_name + "/equalization/method");
+        switch (equalization.method)
+        {
+            case 0: break;
+            case 1:
+                equalization.average.median = _server.getParam<unsigned int>(prefix + _unique_name + "/equalization/average/median");
+                break;
+            case 2:
+                // note: cv::histogramEqualization() has no tuning params
+                break;
+            case 3:
+                equalization.clahe.clip_limit = _server.getParam<double>(prefix + _unique_name + "/equalization/clahe/clip_limit");
+                vector<int> grid_size = _server.getParam<vector<int>>(prefix + _unique_name + "/equalization/clahe/tile_grid_size");
+                equalization.clahe.tile_grid_size.width  = grid_size[0];
+                equalization.clahe.tile_grid_size.height = grid_size[1];
+                break;
+        }
+
+        ransac.prob   = _server.getParam<double>(prefix + _unique_name + "/ransac/prob");
+        ransac.thresh = _server.getParam<double>(prefix + _unique_name + "/ransac/thresh");
+
+        klt.patch_width        = _server.getParam<int>    (prefix + _unique_name + "/klt/patch_width");
+        klt.patch_height       = _server.getParam<int>    (prefix + _unique_name + "/klt/patch_height");
+        klt.max_err            = _server.getParam<double> (prefix + _unique_name + "/klt/max_err");
+        klt.nlevels_pyramids   = _server.getParam<int>    (prefix + _unique_name + "/klt/nlevels_pyramids");
+        klt.criteria           = cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01);  // everybody uses this defaults...
+
+        fast.threshold         = _server.getParam<int>    ( prefix + _unique_name + "/fast/threshold");
+        fast.non_max_suppresion = _server.getParam<bool>  (prefix + _unique_name + "/fast/non_max_suppresion");
+
+        grid.nbr_cells_h   = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/nbr_cells_h");
+        grid.nbr_cells_v   = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/nbr_cells_v");
+        grid.margin        = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/margin");
+        grid.separation    = _server.getParam<unsigned int>(prefix + _unique_name + "/grid/separation");
+
+        min_track_length_for_landmark = _server.getParam<unsigned int>(prefix + _unique_name + "/min_track_length_for_landmark");
+
+    }
+    std::string print() const override
+    {
+        return ParamsProcessorTracker::print()                                                      + "\n"
+            + "equalization.method:       " + std::to_string(equalization.method)                   + "\n"
+            + "ransac.prob:               " + std::to_string(ransac.prob)                           + "\n"
+            + "ransac.thresh:             " + std::to_string(ransac.thresh)                         + "\n"
+            + "klt.patch_width:           " + std::to_string(klt.patch_width)                       + "\n"
+            + "klt.patch_height:          " + std::to_string(klt.patch_height)                      + "\n"
+            + "klt.max_err:               " + std::to_string(klt.max_err)                           + "\n"
+            + "klt.nlevels_pyramids:      " + std::to_string(klt.nlevels_pyramids)                  + "\n"
+            + "fast.threshold:            " + std::to_string(fast.threshold)                        + "\n"
+            + "fast.non_max_suppresion:   " + std::to_string(fast.non_max_suppresion)               + "\n"
+            + "grid.nbr_cells_h:          " + std::to_string(grid.nbr_cells_h)                      + "\n"
+            + "grid.nbr_cells_v:          " + std::to_string(grid.nbr_cells_v)                      + "\n"
+            + "grid.margin:               " + std::to_string(grid.margin)                           + "\n"
+            + "grid.separation:           " + std::to_string(grid.separation)                       + "\n"
+            + "min_track_length_for_landmark:   " + std::to_string(min_track_length_for_landmark)   + "\n";
+    }
+};
+
+WOLF_PTR_TYPEDEFS(ProcessorVisualOdometry);
+
+class ProcessorVisualOdometry : public ProcessorTracker
+{
+    public:
+        ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_visual_odometry);
+        virtual ~ProcessorVisualOdometry() override {};
+
+        WOLF_PROCESSOR_CREATE(ProcessorVisualOdometry, ParamsProcessorVisualOdometry);
+
+    protected:
+        ParamsProcessorVisualOdometryPtr params_visual_odometry_;
+
+        TrackMatrix track_matrix_;
+
+        Matrix2d pixel_cov_;
+
+        // detector
+        cv::Ptr<cv::FeatureDetector> detector_;
+
+        // A few casted smart pointers
+        CaptureImagePtr capture_image_last_;
+        CaptureImagePtr capture_image_incoming_;
+        CaptureImagePtr capture_image_origin_;
+        SensorCameraPtr sen_cam_;
+
+        ActiveSearchGrid cell_grid_;
+
+    private:
+        int frame_count_;
+
+        // camera
+        cv::Mat Kcv_;
+
+        // bookeeping
+        TracksMap tracks_map_li_matched_;
+
+
+    public:
+
+        /**
+         * \brief Get params from sensor to finish processor setup
+         */
+        void configure(SensorBasePtr _sensor) override;
+
+        /** \brief Pre-process incoming Capture, see ProcessorTracker
+         */
+        void preProcess() override;
+
+        /** \brief Post-process, see ProcessorTracker
+         */
+        virtual void postProcess() override;
+
+        /** \brief Vote for KeyFrame generation, see ProcessorTracker
+         */
+        bool voteForKeyFrame() const override;
+
+        /**
+         * \brief Tracker function
+         * \return The number of successful tracks.
+         *
+         * see ProcessorTracker
+         */
+        unsigned int processKnown() override;
+
+
+        /**
+         * \brief Process new Features
+         * \param _max_features the maximum number of new feature tracks
+         * \return the number of new tracks
+         */
+        unsigned int processNew(const int& _max_features) override;
+
+
+        /**
+         * \brief Creates and adds factors from last_ to origin_
+         */
+        void establishFactors() override;
+
+        /**
+         * \brief Emplace a landmark corresponding to a track and initialize it with triangulation.
+         * \param _feature_ptr a pointer to the feature used to create the new landmark
+         * \return a pointer to the created landmark
+         */
+        LandmarkBasePtr emplaceLandmark(FeatureBasePtr _feature_ptr);
+
+
+        /** \brief Advance the incoming Capture to become the last.
+         *
+         * see ProcessorTracker
+         */
+        void advanceDerived() override;
+
+
+        /** \brief Reset the tracker using the \b last Capture as the new \b origin.
+         */
+        void resetDerived() override;
+
+        /**
+         * \brief Implementation of pyramidal KLT with openCV
+         * \param img_prev previous image
+         * \param img_curr current image
+         * \param mwkps_prev keypoints in previous image
+         * \param mwkps_curr keypoints in current image, those tracked from the previous image
+         * \return a map with track associations
+         */
+        TracksMap kltTrack(const cv::Mat img_prev, const cv::Mat img_curr, const KeyPointsMap &mwkps_prev, KeyPointsMap &mwkps_curr);
+
+        /** \brief Remove outliers from the tracks map with a RANSAC 5-points algorithm implemented on openCV
+         */
+        bool filterWithEssential(const KeyPointsMap mwkps_prev, const KeyPointsMap mwkps_curr, TracksMap &tracks_prev_curr, cv::Mat &E);
+
+        /** \brief Tool to merge tracks 
+         */
+        static TracksMap mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next);
+
+        void setParams(const ParamsProcessorVisualOdometryPtr _params);
+
+        const TrackMatrix& getTrackMatrix() const {return track_matrix_;}
+
+    private:
+        void retainBest(std::vector<cv::KeyPoint> &_keypoints, int n);
+
+};
+
+} //namespace wolf
+
+#endif /* INCLUDE_BASE_PROCESSOR_PROCESSOR_VISUAL_ODOMETRY_H_ */
diff --git a/include/vision/sensor/sensor_camera.h b/include/vision/sensor/sensor_camera.h
index 91b23e254d9f86d7f4c621a5a4a49c26d1cf287b..d4b6c34af7cc35eaa7da9c8dedb8754d5bdf1754 100644
--- a/include/vision/sensor/sensor_camera.h
+++ b/include/vision/sensor/sensor_camera.h
@@ -34,64 +34,89 @@ WOLF_STRUCT_PTR_TYPEDEFS(ParamsSensorCamera);
  */
 struct ParamsSensorCamera : public ParamsSensorBase
 {
-        unsigned int width;                     ///< Image width in pixels
-        unsigned int height;                    ///< Image height in pixels
+        unsigned int    width;                  ///< Image width in pixels
+        unsigned int    height;                 ///< Image height in pixels
+        bool            using_raw;              ///< Use raw images?
         Eigen::Vector4d pinhole_model_raw;      ///< k = [u_0, v_0, alpha_u, alpha_v]  vector of pinhole intrinsic parameters
         Eigen::Vector4d pinhole_model_rectified;///< k = [u_0, v_0, alpha_u, alpha_v]  vector of pinhole intrinsic parameters
         Eigen::VectorXd distortion;             ///< d = [d_1, d_2, d_3, ...] radial distortion coefficients
-    ParamsSensorCamera()
-    {
-        //DEFINED FOR COMPATIBILITY PURPOSES. TO BE REMOVED IN THE FUTURE.
-    }
+    ParamsSensorCamera() :
+        width(0),
+        height(0),
+        using_raw(true)
+        {
+            //
+        };
+
     ParamsSensorCamera(std::string _unique_name, const ParamsServer& _server):
         ParamsSensorBase(_unique_name,  _server)
     {
-        width                   = _server.getParam<unsigned int>(prefix + _unique_name       + "/width");
-        height                  = _server.getParam<unsigned int>(prefix + _unique_name       + "/height");
-        VectorXd distortion     = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/distortion_coefficients/data");
-        VectorXd intrinsic      = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/camera_matrix/data");
-        VectorXd projection     = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/projection_matrix/data");
-
-        pinhole_model_raw[0] = intrinsic[2];
-        pinhole_model_raw[1] = intrinsic[5];
-        pinhole_model_raw[2] = intrinsic[0];
-        pinhole_model_raw[3] = intrinsic[4];
-
-        pinhole_model_rectified[0] = projection[2];
-        pinhole_model_rectified[1] = projection[6];
-        pinhole_model_rectified[2] = projection[0];
-        pinhole_model_rectified[3] = projection[5];
-
-        assert (distortion.size() == 5 && "Distortion size must be size 5!");
-
-        WOLF_WARN_COND( distortion(2) != 0 || distortion(3) != 0 , "Wolf does not handle tangential distortion. Please consider re-calibrating without tangential distortion!");
-
-        if (distortion(4) == 0)
-            if (distortion(1) == 0)
-                if (distortion(0) == 0)
+        width               = _server.getParam<unsigned int>(prefix + _unique_name       + "/width");
+        height              = _server.getParam<unsigned int>(prefix + _unique_name       + "/height");
+        using_raw           = _server.getParam<bool>        (prefix + _unique_name       + "/using_raw");
+        VectorXd distort    = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/distortion_coefficients/data");
+        VectorXd intrinsic  = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/camera_matrix/data");
+        VectorXd projection = _server.getParam<Eigen::VectorXd>(prefix + _unique_name + "/projection_matrix/data");
+
+        /* Get raw params from K matrix in camera_info:
+         *
+         *     | au  0 u0 |
+         * K = |  0 av v0 |
+         *     |  0  0  1 |
+         */
+        pinhole_model_raw[0] = intrinsic[2]; // u0
+        pinhole_model_raw[1] = intrinsic[5]; // v0
+        pinhole_model_raw[2] = intrinsic[0]; // au
+        pinhole_model_raw[3] = intrinsic[4]; // av
+
+        /* Get rectified params from P matrix in camera_info:
+         *
+         *     | au  0 u0 tx |
+         * P = |  0 av v0 ty |
+         *     |  0  0  1  0 |
+         */
+        pinhole_model_rectified[0] = projection[2]; // u0
+        pinhole_model_rectified[1] = projection[6]; // v0
+        pinhole_model_rectified[2] = projection[0]; // au
+        pinhole_model_rectified[3] = projection[5]; // av
+
+        /* Get distortion params from vector D in camera_info:
+         *
+         * D = [ r1 r2 t1 t2 r3 ], with ri: dadial params; ti: tangential params
+         *
+         * NOTE: Wolf ignores tangential params!!!
+         */
+        assert (distort.size() == 5 && "Distortion size must be size 5!");
+
+        WOLF_WARN_COND( distort(2) != 0 || distort(3) != 0 , "Wolf does not handle tangential distortion. Please consider re-calibrating without tangential distortion!");
+
+        if (distort(4) == 0)
+            if (distort(1) == 0)
+                if (distort(0) == 0)
                     distortion.resize(0);
                 else
                 {
                     distortion.resize(1);
-                    distortion = distortion.head<1>();
+                    distortion = distort.head<1>();
                 }
             else
             {
                 distortion.resize(2);
-                distortion = distortion.head<2>();
+                distortion = distort.head<2>();
             }
         else
         {
             distortion.resize(3);
-            distortion.head<2>() = distortion.head<2>();
-            distortion.tail<1>() = distortion.tail<1>();
+            distortion.head<2>() = distort.head<2>();
+            distortion.tail<1>() = distort.tail<1>();
         }
     }
     std::string print() const override
     {
-        return ParamsSensorBase::print()                                             + "\n"
+        return ParamsSensorBase::print()                                                    + "\n"
             + "width: "         + std::to_string(width)                                     + "\n"
             + "height: "        + std::to_string(height)                                    + "\n"
+            + "using_raw: "     + std::to_string(using_raw)                                 + "\n"
             + "pinhole: "       + converter<std::string>::convert(pinhole_model_raw)        + "\n"
             + "pinhole rect.: " + converter<std::string>::convert(pinhole_model_rectified)  + "\n"
             + "distortion: "    + converter<std::string>::convert(distortion)               + "\n";
@@ -112,19 +137,20 @@ class SensorCamera : public SensorBase
 
         ~SensorCamera() override;
 
-        Eigen::VectorXd getDistortionVector()   { return distortion_; }
-        Eigen::VectorXd getCorrectionVector()   { return correction_; }
-        Eigen::Matrix3d getIntrinsicMatrix()    { return K_; }
+        Eigen::VectorXd getDistortionVector()const   { return distortion_; }
+        Eigen::VectorXd getCorrectionVector()const   { return correction_; }
+        Eigen::Matrix3d getIntrinsicMatrix() const   { return K_; }
+        Eigen::Vector4d getPinholeModel()    const   { if (using_raw_) return pinhole_model_raw_; else return pinhole_model_rectified_;}
+        Eigen::Matrix<double, 3, 4>getProjectionMatrix() const;
 
-        bool isUsingRawImages() { return using_raw_; }
+        bool isUsingRawImages() const { return using_raw_; }
         bool useRawImages();
         bool useRectifiedImages();
 
-
-        int getImgWidth(){return img_width_;}
-        int getImgHeight(){return img_height_;}
-        void setImgWidth(int _w){img_width_ = _w;}
-        void setImgHeight(int _h){img_height_ = _h;}
+        int getImgWidth()  const    {return img_width_;}
+        int getImgHeight() const    {return img_height_;}
+        void setImgWidth(int _w)    {img_width_ = _w;}
+        void setImgHeight(int _h)   {img_height_ = _h;}
 
     private:
         int img_width_;
@@ -135,7 +161,7 @@ class SensorCamera : public SensorBase
         Eigen::Matrix3d K_;
         bool using_raw_;
 
-        virtual Eigen::Matrix3d setIntrinsicMatrix(Eigen::Vector4d _pinhole_model);
+        Eigen::Matrix3d computeIntrinsicMatrix(const Eigen::Vector4d& _pinhole_model) const;
 
     public:
         EIGEN_MAKE_ALIGNED_OPERATOR_NEW; // to guarantee alignment (see http://eigen.tuxfamily.org/dox-devel/group__TopicStructHavingEigenMembers.html)
@@ -144,7 +170,7 @@ class SensorCamera : public SensorBase
 inline bool SensorCamera::useRawImages()
 {
     getIntrinsic()->setState(pinhole_model_raw_);
-    K_ = setIntrinsicMatrix(pinhole_model_raw_);
+    K_ = computeIntrinsicMatrix(pinhole_model_raw_);
     using_raw_ = true;
 
     return true;
@@ -153,7 +179,9 @@ inline bool SensorCamera::useRawImages()
 inline bool SensorCamera::useRectifiedImages()
 {
     getIntrinsic()->setState(pinhole_model_rectified_);
-    K_ = setIntrinsicMatrix(pinhole_model_rectified_);
+    K_ = computeIntrinsicMatrix(pinhole_model_rectified_);
+    distortion_.resize(0); // remove distortion from model
+    correction_.resize(0);
     using_raw_ = false;
 
     return true;
diff --git a/scilab/corner_detector.sce b/scilab/corner_detector.sce
deleted file mode 100644
index 18035de71787e22dd3d53adc8002cbbac8f873f6..0000000000000000000000000000000000000000
--- a/scilab/corner_detector.sce
+++ /dev/null
@@ -1,237 +0,0 @@
-//info about 2D homogeneous lines and points: http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/BEARDSLEY/node2.html
-
-// clear all 
-xdel(winsid());
-clear;
-
-//scan params:
-Ns = 720; //scan rays
-aperture = %pi; //scan aperture [rad]
-azimuth_step = aperture/Ns;
-
-//User Tunning params
-Nw = 8; //window size
-theta_th = %pi/8;
-theta_max = 0.3;
-K = 3; //How many std_dev are tolerated to count that a point is supporting a line
-r_stdev = 0.01; //ranging std dev
-max_beam_dist = 5; //max distance in amount of beams to consider concatenation of two lines
-max_dist = 0.2; //max distance to a corner from the ends of both lines to take it
-range_jump_th = 0.5; //threshold of distance to detect jumps in ranges
-max_points_line = 1000; //max amount of beams of a line
-
-//init
-points = [];
-result_lines = [];
-line_indexes = [];
-corners = [];
-corners_jumps = [];
-jumps = [];
-
-//scan ranges
-ranges = read(fullpath('scan.txt'),-1,Ns);
-//ranges = [];
-
-//invent a set of points + noise
-//points = [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24  25 26  27 28  29 30  31 32  33  34  35  36  37 38  39  40  41  42  43;
-//          7 6 5 4 3 2 1 2 3 4  5  6  7  8  9  10 9  8  7  6  5  4  3  3.5 4  4.5 5  5.5 6  6.5 7  7.5 7.4 7.3 7.2 7.1 7  6.9 6.8 6.7 6.6 6.5 6.4];
-for i=1:Ns
-   points = [points [ranges(i)*cos(aperture/2 - azimuth_step*i); ranges(i)*sin(aperture/2 - azimuth_step*i)]];
-   // Store range jumps
-   if i>1 & abs(ranges(i)-ranges(i-1)) > range_jump_th then
-       jumps = [jumps i];
-   end
-end
-
-points = points + rand(points,"normal")*r_stdev;
-
-//Main loop. Runs over a sliding window of Nw points
-i_jump = 1;
-for i = Nw:size(points,2)
-    
-    //set the window indexes
-    i_from = i-Nw+1;
-    i_to = i;
-    points_w = points(:,i_from:i_to);
-    
-    //update the jump to be checked
-    while i_jump < size(jumps,2) & i_from > jumps(i_jump)
-        i_jump = i_jump+1;
-    end
-    
-    //check if there is a jump inside the window (if it is the case, do not fit a line)
-    if jumps(i_jump) > i_from & jumps(i_jump) <= i_to then
-        continue;
-    end
-
-    //Found the best fitting line over the window. Build the system: Ax=0. Matrix A = a_ij
-    a_00 = sum( points_w(1,:).^2 );
-    a_01 = sum( points_w(1,:).*points_w(2,:) );
-    a_02 = sum( points_w(1,:) );
-    a_10 = a_01;
-    a_11 = sum( points_w(2,:).^2 );
-    a_12 = sum( points_w(2,:) );
-    a_20 = a_02;
-    a_21 = a_12;
-    a_22 = Nw;
-    A1 = [a_00 a_01 a_02; a_10 a_11 a_12; a_20 a_21 a_22; 0 0 1];
-    
-    a_00 = sum( points_w(1,:).^2 );
-    a_01 = sum( points_w(1,:).*points_w(2,:) );
-    a_02 = sum( points_w(1,:) );
-    a_10 = a_01;
-    a_11 = sum( points_w(2,:).^2 );
-    a_12 = sum( points_w(2,:) );
-    A = [a_00 a_01 a_02; a_10 a_11 a_12; 0 0 1];
-    
-    //solve
-    line1 = pinv(A1)*[zeros(3,1);1];
-    line = inv(A)*[0; 0; 1];
-    //disp(line1-line);
-
-    //compute error
-    err = 0;
-    for j=1:Nw
-        err = err + abs(line'*[points_w(:,j);1])/sqrt(line(1)^2+line(2)^2);
-    end
-    err = err/Nw;
-    //disp("error: "); disp(err);
-    
-    //if error below stdev, add line to result set
-    if err < K*r_stdev then
-        result_lines = [result_lines [line;points_w(:,1);points_w(:,$)]];
-        line_indexes = [line_indexes [i_from; i_to]]; //ray index where the segment ends
-    end    
-end
-
-//line concatenation
-j = 1;
-while (j < size(result_lines,2))
-    // beams between last of current line and first of next line
-    if (line_indexes(1,j+1)-line_indexes(2,j)) > max_beam_dist then
-        j=j+1;
-    else    
-        //compute angle diff between consecutive segments
-        cos_theta = result_lines(1:2,j)'*result_lines(1:2,j+1) / ( norm(result_lines(1:2,j)) * norm(result_lines(1:2,j+1)) );
-        theta = abs(acos(cos_theta));
-        
-        //if angle diff lower than threshold, concatenate
-        if theta < theta_max then
-            
-            //set the new window
-            i_from = line_indexes(1,j);
-            i_to = line_indexes(2,j+1);
-            points_w = points(:,i_from:i_to);
-        
-            //Found the best fitting line over the window. Build the system: Ax=0. Matrix A = a_ij           
-            a_00 = sum( points_w(1,:).^2 );
-            a_01 = sum( points_w(1,:).*points_w(2,:) );
-            a_02 = sum( points_w(1,:) );
-            a_10 = a_01;
-            a_11 = sum( points_w(2,:).^2 );
-            a_12 = sum( points_w(2,:) );
-            A = [a_00 a_01 a_02; a_10 a_11 a_12; 0 0 1];
-            
-            //solve
-            line = inv(A)*[0; 0; 1];
-        
-            //compute error
-            err = 0;
-            for k=1:Nw
-                err = err + abs(line'*[points_w(:,k);1])/sqrt(line(1)^2+line(2)^2);
-            end
-            err = err/Nw;
-            
-            //if error below stdev, change line to concatenation and erase the next line
-            if err < K*r_stdev then
-                result_lines(:,j) = [line;points_w(:,1);points_w(:,$)];
-                line_indexes(:,j) = [i_from; i_to];
-                result_lines = [result_lines(:,1:j) result_lines(:,j+2:$)];
-                line_indexes = [line_indexes(:,1:j) line_indexes(:,j+2:$)];
-                if (i_to-i_from)>max_points_line then
-                    j=j+1;
-                end
-            else
-                j=j+1;
-            end    
-        else
-            j=j+1;
-        end
-    end
-end
-
-//corner detection
-for i = 1:(size(result_lines,2)-1)
-    for j = i+1:size(result_lines,2)
-        // beams between last of current line and first of next line
-        if (line_indexes(1,j)-line_indexes(2,i)) > max_beam_dist then
-            break;
-        end
-        //compute angle diff between consecutive segments
-        cos_theta = result_lines(1:2,i)'*result_lines(1:2,j) / ( norm(result_lines(1:2,i)) * norm(result_lines(1:2,j)) );
-        theta = abs(acos(cos_theta));
-        
-        //if angle diff greater than threshold && indexes are less than Nw, we decide corner
-        if theta > theta_th then
-            //Corner found! Compute "sub-pixel" corner location as the intersection of two lines
-            corner = cross(result_lines(1:3,i),result_lines(1:3,j));
-            corner = corner./corner(3);//norlamlize homogeneous point
-            // Check if the corner is close to both lines ends
-            if (norm(corner(1:2)-points(:,line_indexes(2,i))) < max_dist & norm(corner(1:2)-points(:,line_indexes(1,j))) < max_dist) then
-                corners = [corners corner];
-            end
-            //display
-            //disp("theta: "); disp(theta);
-            //disp("index:" ); disp(line_indexes(i)-Nw+1);//line_indexes(i) indicates the end point of the segment
-        end
-    end
-end
-
-// corner detection from jumps
-for i=1:size(jumps,2)
-    if ranges(jumps(i)) < ranges(jumps(i)-1) then
-        corners_jumps = [corners_jumps points(:,jumps(i))];
-    else
-        corners_jumps = [corners_jumps points(:,jumps(i)-1)];
-    end
-end
-    
-//Set figure
-fig1 = figure(0);
-fig1.background = 8;
-
-//plot points
-plot(points(1,:),points(2,:),"g.");
-
-//axis settings
-ah = gca();
-ah.isoview = "on";
-ah.x_label.text = "$x [m]$";
-ah.x_label.font_size = 4;
-ah.y_label.text = "$y [m]$";
-ah.grid = [0.1,0.1,0.1];
-ah.grid_position = "background";
-
-
-//plot lines
-for i=1:size(result_lines,2)
-    m = -result_lines(1,i)/result_lines(2,i);
-    xc = -result_lines(3,i)/result_lines(2,i);
-    point1 = [result_lines(4,i) m*result_lines(4,i)+xc];
-    point2 = [result_lines(6,i) m*result_lines(6,i)+xc];
-    xpoly([point1(1) point2(1)],[point1(2) point2(2)]);
-end
-
-//plot corners
-plot(corners(1,:),corners(2,:),"ro");
-plot(corners_jumps(1,:),corners_jumps(2,:),"bo");
-
-disp(corners_jumps');
-disp(corners');
-
-//plot jumps
-//for i=1:size(jumps,2)
-//    plot(ranges(jumps(i))*cos(aperture/2 - azimuth_step*jumps(i)), ranges(jumps(i))*sin(aperture/2 - azimuth_step*jumps(i)),"bx");
-//end
-
-
diff --git a/scilab/scan.txt b/scilab/scan.txt
deleted file mode 100644
index d19cf457c35f5fa45abe6d1a9c63883d5ffd891d..0000000000000000000000000000000000000000
--- a/scilab/scan.txt
+++ /dev/null
@@ -1 +0,0 @@
-2.78886,2.78289,2.7832,2.78367,2.7843,2.78508,2.78603,2.78713,2.78839,2.78981,2.79139,2.78669,2.78855,2.79057,2.79274,2.79507,2.79098,2.79359,2.79635,2.79927,2.79566,2.79886,2.80221,2.79895,2.80258,2.80638,2.80345,2.80753,2.81176,2.80917,2.81368,2.81132,2.81611,2.81396,2.81903,2.73664,2.72743,2.71782,2.70883,2.69945,2.69067,2.6815,2.67294,2.66398,2.65562,2.64736,2.6387,2.63064,2.62218,2.61431,2.60605,2.59837,2.59078,2.5828,2.57539,2.56759,2.56036,2.55321,2.54568,2.53871,2.53182,2.52455,2.51782,2.51118,2.50415,2.49767,2.49127,2.48495,2.47824,2.47207,2.46597,2.4595,2.45355,2.44768,2.44188,2.4357,2.43005,2.42446,2.41894,2.4135,2.40768,2.40236,2.39712,2.39195,2.3864,2.38135,2.37637,2.37146,2.36661,2.36182,2.35666,2.352,2.3474,2.34286,2.34186,2.36289,2.41051,2.43311,2.45628,2.48003,2.50439,2.52937,2.555,2.61224,2.63993,2.66837,2.69757,2.72758,2.75841,2.79011,2.82269,2.85621,2.8907,2.96659,3.0042,3.04295,3.08289,3.12406,3.16652,3.21033,3.22693,3.23444,3.24207,3.24982,3.2577,3.26571,3.2855,3.29383,3.30229,3.31088,3.3196,3.32846,3.33745,3.34658,3.35584,3.36524,3.37478,3.38445,3.39427,3.40423,3.41434,3.42459,3.43499,3.44553,3.45623,3.46707,3.47807,3.48923,3.50053,3.512,3.52362,3.53541,3.54736,3.55947,3.57175,3.5842,3.59681,3.6096,3.62256,3.63569,3.64901,3.6625,3.67617,3.69003,3.70407,3.7183,3.73272,3.74733,3.76214,3.7931,3.80843,3.82397,3.83972,3.85567,3.87183,3.88822,3.90481,3.92163,3.93867,3.95593,3.97343,3.97347,3.99127,4.00931,4.02758,4.0461,4.06486,4.08387,4.10314,4.12266,4.14244,4.16248,4.20237,4.22313,4.24418,4.26551,4.28712,4.30903,4.33122,4.35372,4.37652,4.39963,4.42304,4.44678,4.47084,4.49523,4.51994,4.545,4.57041,4.59615,4.62225,4.64871,4.67554,4.70274,4.73032,4.75828,4.78663,4.81538,4.84452,4.8741,4.90409,4.9345,4.96534,4.99662,5.02836,5.06053,5.09317,5.12628,5.15987,5.19395,5.22853,5.2636,5.2992,5.33532,5.37197,5.44106,5.47923,5.51796,5.55729,5.59721,5.63773,5.67888,5.72066,5.76307,5.80615,5.8499,5.89432,5.93945,6.02374,6.07085,6.11872,6.16734,6.21676,6.26698,6.318,6.36987,6.42258,6.47618,6.5758,6.6319,6.68894,6.74694,6.80593,6.86593,6.92698,7.04022,7.10425,7.1694,7.23572,7.30322,7.37192,7.49928,7.57149,7.64505,7.58372,7.51951,7.45681,7.32129,7.32938,7.34276,7.35632,7.36877,7.38272,7.39687,7.41124,7.4258,7.43923,7.4542,7.46937,7.48477,7.49904,7.51485,7.53087,7.54579,7.56225,7.57894,7.59587,7.61164,7.62902,8.37389,8.39194,8.41173,8.43177,8.45059,8.47118,8.49202,8.51164,8.53305,8.55319,8.57516,8.59739,8.61839,8.64122,8.6628,8.6862,8.70837,8.73237,8.7567,8.77979,8.80472,8.82843,8.85402,8.87835,8.9046,8.9296,8.9565,8.98218,9.0082,9.03616,9.06287,9.09152,9.11895,9.14834,9.17652,9.20503,9.23557,9.26487,9.29454,9.32626,9.35674,9.38762,9.42055,9.45226,9.4844,9.51695,9.55162,9.58503,9.61893,9.65324,4.38929,4.38536,4.36058,4.3365,4.3131,4.29036,4.26827,4.24682,4.22598,4.20576,4.18612,4.1944,4.17582,4.15708,4.13859,4.12032,4.10229,4.08449,4.06691,4.04955,4.03241,4.01549,3.99916,3.98265,3.96634,3.95024,3.93434,3.91901,3.90349,3.88817,3.87304,3.85845,3.84368,3.82909,3.81504,3.80081,3.78674,3.7732,3.75948,3.74591,3.73287,3.71963,3.7069,3.69398,3.68156,3.66894,3.65647,3.6445,3.63233,3.62065,3.60876,3.59736,3.58576,3.58265,3.61553,3.62696,3.63867,3.67347,3.68596,3.72229,3.7356,3.77355,3.78772,3.80219,3.84244,3.85785,3.89993,3.9163,3.93303,3.97774,3.99551,4.01367,4.06121,4.0805,4.10019,4.15081,4.17174,4.19309,13.8062,13.7714,13.7384,13.7075,13.8936,13.9735,14.0549,14.1382,14.3407,15.8017,15.9009,16.002,16.1054,16.3519,16.462,16.5744,16.6893,16.9594,17.0819,17.2072,17.3352,17.4661,17.6,8.14878,8.1334,8.11823,8.10324,8.08848,8.07391,8.0588,8.04465,8.03069,8.01693,8.00338,7.99,7.97684,7.96312,7.95032,7.93773,7.92533,7.91309,7.90106,7.88922,7.87755,7.86606,7.85476,7.84289,7.83195,7.82116,7.81058,7.80017,7.78993,7.77984,7.76995,7.76021,7.75066,7.74128,7.73204,7.76034,7.99805,8.11853,8.24311,8.37202,12.3718,12.3587,12.346,12.3336,12.3213,12.3094,12.2976,12.2862,12.275,12.264,12.2534,12.2429,12.2327,12.2228,12.213,12.2036,12.1944,12.1854,12.1766,12.3577,12.667,16.7608,16.7501,16.7398,16.7297,16.7201,16.7106,16.7015,16.6929,16.6844,16.9488,20.069,20.0619,20.0552,20.0489,20.043,20.0374,20.0323,20.0275,20.0231,20.0191,20.0155,20.0122,20.0094,20.0069,20.0048,20.0031,20.0018,20.0008,20.0002,20.0001,20.0002,20.0008,20.0018,20.0031,20.0048,20.0069,20.0094,20.0122,20.0155,20.0191,20.0231,20.0275,20.0323,20.0374,20.043,20.0489,20.0552,20.0619,20.069,20.0764,20.0843,20.0926,20.1012,20.1102,20.1196,20.1294,20.1397,20.1502,20.1612,20.1726,20.1844,20.1966,20.2092,20.2222,20.2356,20.2494,20.2636,20.2782,20.2932,20.3086,20.3244,20.3407,20.3573,20.3744,20.3919,20.4098,20.4281,20.4469,20.466,20.4856,20.5057,20.5261,20.547,20.5684,20.5901,20.6123,20.635,20.6581,20.6816,20.7056,20.73,20.7549,20.7802,20.806,20.8323,20.859,20.8862,20.9139,20.942,20.9706,20.9997,21.0293,21.0594,21.0899,21.1209,21.1525,21.1845,21.217,21.2501,21.2836,21.3177,21.3522,21.3873,21.423,21.4591,21.4958,21.533,21.5707,21.6091,21.6479,21.6873,21.7273,21.7678,21.8089,21.8505,21.8928,21.9356,21.979,22.023,22.0676,22.1128,22.1586,22.2051,22.2521,22.2998,22.3481,22.397,22.4466,22.4968,22.5477,22.5992,22.6515,22.7043,22.7579,22.8122,22.8671,22.9228,22.9792,23.0363,23.0941,23.1526,23.2119,23.2719,23.3327,23.3943,23.4566,23.5197,23.5836,23.6483,23.7138,23.7802,23.8473,23.9153,23.9842,24.0539,24.1244,24.1959,24.2682,24.3414,24.4156,24.4906,24.5666,24.6435,24.7214,24.8003,24.8801,24.9609,25.0428,25.1256,25.2095,25.2944,25.3804,25.4675,25.5556,25.6449,25.7353,25.8268,25.9194,26.0132,26.1082,26.2044,26.3018,26.4004,26.5003,26.6015,26.7039,26.8077,26.9127,27.0191,27.1269,27.2361,27.3466,27.4586,27.572,27.6869,27.8033,27.9213,28.0407,28.1617
\ No newline at end of file
diff --git a/scilab/test_ceres_odom_plot.sce b/scilab/test_ceres_odom_plot.sce
deleted file mode 100644
index 7ca80e229594d86d8c4067be0bfe7663ccf0147c..0000000000000000000000000000000000000000
--- a/scilab/test_ceres_odom_plot.sce
+++ /dev/null
@@ -1,80 +0,0 @@
-//plot log data from ceres test
-
-// clear all 
-xdel(winsid());
-clear;
-
-// CERES ODOM BATCH
-//load log file
-data = read('~/Desktop/log_file_2.txt',-1,14);
-
-//plot
-fig1 = figure(0);
-fig1.background = 8;
-plot(data(2:$,10),data(2:$,11),"g.");
-plot(data(2:$,1),data(2:$,2),"b-");
-plot(data(2:$,4),data(2:$,5),"r-");
-plot(data(2:$,13),data(2:$,14),"c--");
-
-ah = gca();
-ah.auto_scale = "on";
-ah.x_label.text = "$x [m]$";
-ah.x_label.font_size = 4;
-ah.y_label.text = "$y [m]$";
-ah.y_label.font_size = 4;
-lh =legend(["$GPS$";"$Optimization$";"$Ground\ Truth$";"$ODOM$"],4);
-lh.font_size = 3;
-title(strcat(["CERES_ODOM_BATCH - Time: ",string(data(1,1))," s"]));
-ah.title.font_size = 4;
-
-// MANAGER - THETA
-//load log file
-data2 = read('~/Desktop/log_file_2.txt',-1,15);
-data2L = read('~/Desktop/landmarks_file_2.txt',-1,2);
-
-disp(data2L);
-
-//plot
-fig2 = figure(1);
-fig2.background = 8;
-//plot(data2(2:$,13),data2(2:$,14),"g.");
-plot(data2(2:$,1),data2(2:$,2),"b-");
-plot(data2(2:$,4),data2(2:$,5),"r-");
-plot(data2(2:$,10),data2(2:$,11),"c--");
-
-plot(data2L(1:$,1),data2L(1:$,2),"k.");
-
-ah = gca();
-ah.auto_scale = "on";
-ah.x_label.text = "$x [m]$";
-ah.x_label.font_size = 4;
-ah.y_label.text = "$y [m]$";
-ah.y_label.font_size = 4;
-lh =legend(["$Optimization$";"$Ground\ Truth$";"$ODOM$";"$Landmarks$"],4);
-lh.font_size = 3;                             
-title(strcat(["CERES_MANAGER: Theta - Time: ",string(data2(1,1))," s"]));
-ah.title.font_size = 4;
-
-// MANAGER - COMPLEX ANGLE
-//load log file
-data3 = read('~/Desktop/log_file_3.txt',-1,15);
-
-//plot
-fig3 = figure(2);
-fig3.background = 8;
-plot(data3(2:$,13),data3(2:$,14),"g.");
-plot(data3(2:$,1),data3(2:$,2),"b-");
-plot(data3(2:$,4),data3(2:$,5),"r-");
-plot(data3(2:$,10),data3(2:$,11),"c--");
-
-ah = gca();
-ah.auto_scale = "on";
-ah.x_label.text = "$x [m]$";
-ah.x_label.font_size = 4;
-ah.y_label.text = "$y [m]$";
-ah.y_label.font_size = 4;
-lh =legend(["$GPS$";"$Optimization$";"$Ground\ Truth$";"$ODOM$"],4);
-lh.font_size = 3;                             
-title(strcat(["CERES_MANAGER: Complex Angle - Time: ",string(data3(1,1))," s"]));
-ah.title.font_size = 4;
-
diff --git a/src/capture/capture_image.cpp b/src/capture/capture_image.cpp
index 539e2fe329dd58518d93ef9338421aa9b756bc6d..50483ce754afc88cdb8e293c41b068598b064b9d 100644
--- a/src/capture/capture_image.cpp
+++ b/src/capture/capture_image.cpp
@@ -19,20 +19,35 @@
 // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 //
 //--------LICENSE_END--------
+
+
 #include "vision/capture/capture_image.h"
 
 namespace wolf {
 
-CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _data_cv) :
+
+size_t WKeyPoint::id_count_ = 0;
+
+WKeyPoint::WKeyPoint():
+    id_(++id_count_)
+{
+}
+
+WKeyPoint::WKeyPoint(const cv::KeyPoint& _cv_kp):
+    id_(++id_count_),
+    cv_kp_(_cv_kp)
+{
+}
+
+
+CaptureImage::CaptureImage(const TimeStamp& _ts, SensorCameraPtr _camera_ptr, const cv::Mat& _img) :
     CaptureBase("CaptureImage", _ts, _camera_ptr),
-    frame_(_data_cv),
-    grid_features_(nullptr),
-    keypoints_(KeyPointVector()),
+    img_(_img),
+    mwkps_(KeyPointsMap()),
     descriptors_(cv::Mat()),
-    matches_from_precedent_(DMatchVector()),
-    matches_normalized_scores_(std::vector<double>()),
-    map_index_to_next_(std::map<int, int>()),
-    global_descriptor_(cv::Mat())
+    tracks_origin_(TracksMap()),
+    tracks_prev_(TracksMap()),
+    last_was_repopulated_(false)
 {
     //
 }
@@ -44,37 +59,57 @@ CaptureImage::~CaptureImage()
 
 const cv::Mat& CaptureImage::getImage() const
 {
-    return frame_.getImage();
+    return img_;
+}
+
+void CaptureImage::setImage(const cv::Mat& _img)
+{
+    // Is assignment enough? Use clone or copyTo instead?
+    img_ = _img;
+}
+
+
+void CaptureImage::addKeyPoint(const WKeyPoint& _wkp)
+{
+    mwkps_.insert(std::pair<size_t, WKeyPoint>(_wkp.getId(), _wkp));
 }
 
-void CaptureImage::setDescriptors(const cv::Mat& _descriptors)
+void CaptureImage::addKeyPoint(const cv::KeyPoint& _cv_kp)
 {
-    frame_.setDescriptors(_descriptors);
+    WKeyPoint wkp(_cv_kp); 
+    addKeyPoint(wkp);
 }
 
-void CaptureImage::setKeypoints(const std::vector<cv::KeyPoint> &_keypoints)
+void CaptureImage::addKeyPoints(const std::vector<WKeyPoint>& _vec_wkp)
 {
-    frame_.setKeyPoints(_keypoints);
+    for (WKeyPoint wkp: _vec_wkp){
+        addKeyPoint(wkp);
+    }
 }
 
-cv::Mat& CaptureImage::getDescriptors()
+void CaptureImage::addKeyPoints(const std::vector<cv::KeyPoint>& _vec_cv_kp)
 {
-    return frame_.getDescriptors();
+    for (auto cv_kp: _vec_cv_kp){
+//        WKeyPoint wkp(cv_kp);
+        addKeyPoint(cv_kp);
+    }
 }
 
-std::vector<cv::KeyPoint>& CaptureImage::getKeypoints()
+void CaptureImage::addKeyPoints(const KeyPointsMap& _mwkps)
 {
-    return frame_.getKeyPoints();
+    for (auto mwkp: _mwkps){
+        addKeyPoint(mwkp.second);
+    }
 }
 
-void CaptureImage::setGlobalDescriptor(const cv::Mat& _global_descriptor)
+void CaptureImage::removeKeyPoint(size_t _id)
 {
-    global_descriptor_ = _global_descriptor;
+    mwkps_.erase(_id);
 }
 
-cv::Mat& CaptureImage::getGlobalDescriptor()
+void CaptureImage::removeKeyPoint(const WKeyPoint& _wkp)
 {
-    return global_descriptor_;
+    mwkps_.erase(_wkp.getId());
 }
 
 } // namespace wolf
diff --git a/src/feature/feature_point_image.cpp b/src/feature/feature_point_image.cpp
index 07918b787dffa1477223ec8fea8c93a0060305bb..317364cfa4f2b678840429865dde664f3080ce79 100644
--- a/src/feature/feature_point_image.cpp
+++ b/src/feature/feature_point_image.cpp
@@ -24,6 +24,16 @@
 
 namespace wolf {
 
+/// Constructor from OpenCV measured keypoint
+FeaturePointImage::FeaturePointImage(const WKeyPoint& _keypoint,
+                                     const Eigen::Matrix2d& _meas_covariance) :
+        FeatureBase("FeaturePointImage", Eigen::Vector2d::Zero(), _meas_covariance),
+        kp_(_keypoint)
+{
+    measurement_(0) = double(_keypoint.getCvKeyPoint().pt.x);
+    measurement_(1) = double(_keypoint.getCvKeyPoint().pt.y);
+}
+
 FeaturePointImage::~FeaturePointImage()
 {
     //
diff --git a/src/landmark/landmark_hp.cpp b/src/landmark/landmark_hp.cpp
index ace4a34939efd8815dc971fa1ba238fd7cac0bcd..62fa68c98a7ecb7021b38ac0d0801ce2a1271933 100644
--- a/src/landmark/landmark_hp.cpp
+++ b/src/landmark/landmark_hp.cpp
@@ -29,7 +29,6 @@ namespace wolf {
 
 /* Landmark - Homogeneous Point*/
 LandmarkHp::LandmarkHp(Eigen::Vector4d _position_homogeneous,
-						 SensorBasePtr _sensor,
                          cv::Mat _2d_descriptor) :
     LandmarkBase("LandmarkHp", std::make_shared<StateHomogeneous3d>(_position_homogeneous)),
     cv_descriptor_(_2d_descriptor.clone())
@@ -69,7 +68,7 @@ LandmarkBasePtr LandmarkHp::create(const YAML::Node& _node)
     std::vector<int>    v           = _node["descriptor"]   .as< std::vector<int> >();
     cv::Mat desc(v);
 
-    LandmarkBasePtr lmk = std::make_shared<LandmarkHp>(pos_homog, nullptr, desc);
+    LandmarkBasePtr lmk = std::make_shared<LandmarkHp>(pos_homog, desc);
     lmk->setId(id);
     return lmk;
 }
diff --git a/src/processor/active_search.cpp b/src/processor/active_search.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c3929761b46cfca38ef6c6be3c09e8e4c485bc5b
--- /dev/null
+++ b/src/processor/active_search.cpp
@@ -0,0 +1,149 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+/**
+ * \file active_search.cpp
+ * \date 10/04/2016
+ * \author jsola, dinesh
+ */
+
+#include "vision/processor/active_search.h"
+
+#include <iostream>
+
+namespace wolf{
+
+ActiveSearchGrid::ActiveSearchGrid() : separation_(0), margin_(0) {}
+
+ActiveSearchGrid::ActiveSearchGrid(const int & _img_size_h, const int & _img_size_v, const int & _n_cells_h,
+                                   const int & _n_cells_v, const int & _margin, const int & _separation)
+{
+    setup(_img_size_h, _img_size_v, _n_cells_h, _n_cells_v, _margin, _separation);
+}
+
+void ActiveSearchGrid::setup(const int & _img_size_h, const int & _img_size_v,
+                   const int & _n_cells_h, const int & _n_cells_v,
+                   const int & _margin, const int & _separation)
+    {
+    separation_ = _separation;
+    margin_ = _margin;
+
+    projections_count_.resize(_n_cells_h + 1, _n_cells_v + 1);
+    empty_cells_tile_tmp_.resize(2, (_n_cells_h + 1) * (_n_cells_v + 1));
+    img_size_(0) = _img_size_h;
+    img_size_(1) = _img_size_v;
+    grid_size_(0) = _n_cells_h + 1;
+    grid_size_(1) = _n_cells_v + 1;
+    cell_size_(0) = _img_size_h / _n_cells_h;
+    cell_size_(1) = _img_size_v / _n_cells_v;
+    offset_ = -cell_size_ / 2;
+    renew();
+}
+
+void ActiveSearchGrid::resizeImage(unsigned int _img_size_h, unsigned int _img_size_v)
+{
+    img_size_(0) = _img_size_h;
+    img_size_(1) = _img_size_v;
+    cell_size_(0) = _img_size_h / (grid_size_(0) - 1);
+    cell_size_(1) = _img_size_v / (grid_size_(1) - 1);
+    offset_ = -cell_size_ / 2;
+    renew();
+}
+
+
+bool ActiveSearchGrid::pickEmptyCell(Eigen::Vector2i & _cell) {
+    int k = 0;
+    Eigen::Vector2i cell0;
+    for (int i = 1; i < grid_size_(0) - 1; i++) {
+        for (int j = 1; j < grid_size_(1) - 1; j++) {
+            cell0(0) = i;
+            cell0(1) = j;
+            if (projections_count_(i, j) == 0) // cell in empty AND not blocked
+            {
+                empty_cells_tile_tmp_(0,k) = i; //may be done in a better way
+                empty_cells_tile_tmp_(1,k) = j;
+                k++;
+            }
+        }
+    }
+    if (k > 0) { // number of empty inner cells
+        int idx = rand() % k; // between 0 and k-1
+        _cell(0) = empty_cells_tile_tmp_(0, idx);
+        _cell(1) = empty_cells_tile_tmp_(1, idx);
+        return true;
+    }
+    else
+        return false;
+}
+
+Eigen::Vector2i ActiveSearchGrid::cellOrigin(const Eigen::Vector2i & _cell) {
+    Eigen::Vector2i cell0;
+    cell0(0) = offset_(0) + cell_size_(0) * _cell(0);
+    cell0(1) = offset_(1) + cell_size_(1) * _cell(1);
+    return cell0;
+}
+
+void ActiveSearchGrid::cell2roi(const Eigen::Vector2i & _cell, cv::Rect & _roi) {
+    roi_coordinates_ = cellOrigin(_cell);
+    roi_coordinates_(0) += separation_;
+    roi_coordinates_(1) += separation_;
+    Eigen::Vector2i roi_size = cell_size_;
+    roi_size(0) -= 2 * separation_;
+    roi_size(1) -= 2 * separation_;
+    _roi.x = roi_coordinates_(0);
+    _roi.y = roi_coordinates_(1);
+    _roi.width = roi_size(0);
+    _roi.height = roi_size(1);
+}
+
+
+bool ActiveSearchGrid::pickRoi(cv::Rect & _roi) {
+
+    Eigen::Vector2i cell;
+    if (pickEmptyCell(cell)) {
+        cell2roi(cell, _roi);
+        return true;
+    }
+    else
+        return false;
+}
+
+void ActiveSearchGrid::blockCell(const Eigen::Vector2i & _cell)
+{
+    projections_count_(_cell(0), _cell(1)) = -1;
+}
+
+void ActiveSearchGrid::blockCell(const cv::Rect & _roi)
+{
+    Eigen::Vector2i pix;
+    pix(0) = _roi.x+_roi.width/2;
+    pix(1) = _roi.y+_roi.height/2;
+    Eigen::Vector2i cell = coords2cell(pix(0), pix(1));
+    blockCell(cell);
+}
+
+bool ActiveSearchGrid::isCellBlocked (const Eigen::Vector2i& _cell)
+{
+    return projections_count_(_cell(0), _cell(1)) < 0;
+}
+
+
+}
diff --git a/src/processor/processor_bundle_adjustment.cpp b/src/processor/processor_bundle_adjustment.cpp
deleted file mode 100644
index 4486c0e713ac4c19a0004259f0d36a4a3b533ea2..0000000000000000000000000000000000000000
--- a/src/processor/processor_bundle_adjustment.cpp
+++ /dev/null
@@ -1,824 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-/*
- * processor_bundle_adjustment.cpp
- *
- *  Created on: May 3, 2019
- *      Author: ovendrell
- */
-
-//wolf
-#include "vision_utils/vision_utils.h"
-#include "vision_utils/detectors.h"
-#include "vision_utils/descriptors.h"
-#include "vision_utils/matchers.h"
-
-//standard
-#include <memory>
-#include "vision/processor/processor_bundle_adjustment.h"
-
-namespace wolf{
-
-ProcessorBundleAdjustment::ProcessorBundleAdjustment(ParamsProcessorBundleAdjustmentPtr _params_bundle_adjustment) :
-                ProcessorTrackerFeature("ProcessorBundleAdjustment", "PO", 3, _params_bundle_adjustment),
-                params_bundle_adjustment_(_params_bundle_adjustment),
-                capture_image_last_(nullptr),
-                capture_image_incoming_(nullptr),
-                frame_count_(0)
-{
-	//Initialize detector-descriptor-matcher
-	pixel_cov_ = Eigen::Matrix2d::Identity() * params_bundle_adjustment_->pixel_noise_std * params_bundle_adjustment_->pixel_noise_std;
-
-    // Detector yaml file
-    std::string det_name = vision_utils::readYamlType(params_bundle_adjustment_->yaml_file_params_vision_utils, "detector");
-    // Create Detector
-    det_ptr_ = vision_utils::setupDetector(det_name, det_name + " detector", params_bundle_adjustment_->yaml_file_params_vision_utils);
-
-    // Descriptor yaml file
-    std::string des_name = vision_utils::readYamlType(params_bundle_adjustment_->yaml_file_params_vision_utils, "descriptor");
-    // Create Descriptor
-    des_ptr_ = vision_utils::setupDescriptor(des_name, des_name + " descriptor", params_bundle_adjustment_->yaml_file_params_vision_utils);
-
-    // Matcher yaml file
-    std::string mat_name = vision_utils::readYamlType(params_bundle_adjustment_->yaml_file_params_vision_utils, "matcher");
-    // Create Matcher
-    mat_ptr_ = vision_utils::setupMatcher(mat_name, mat_name + " matcher", params_bundle_adjustment_->yaml_file_params_vision_utils);
-
-    //Initialize rvec and tvec
-    tvec_ = cv::Mat::zeros(cv::Size(3,1), CV_32F);
-    rvec_ = cv::Mat::zeros(cv::Size(3,1), CV_32F);
-//    Eigen::Vector3d last_pos = capture_image_last_->getFrame()->getP()->getState();
-//    Eigen::Vector4d last_or = capture_image_last_->getFrame()->getO()->getState();
-//    Eigen::Quaternion<double> last_q(last_or(0), last_or(1), last_or(2), last_or(3));
-//
-//    Eigen::VectorXd tvec_eigen = last_pos;
-//    Eigen::VectorXd rvec_eigen = q2v(last_q);
-//
-//    cv::eigen2cv(tvec_eigen, tvec);
-//    cv::eigen2cv(rvec_eigen, rvec);
-
-
-}
-
-void ProcessorBundleAdjustment::configure(SensorBasePtr _sensor)
-{
-    //TODO: Implement if needed
-	//Initialize camera sensor pointer
-	camera = std::static_pointer_cast<SensorCamera>(_sensor);
-
-}
-
-void ProcessorBundleAdjustment::preProcess()
-{
-    // This method implements all Vision algorithms concerning OpenCV, so wolf algorithm only has to manage the data obtained
-    // Get Capture
-    capture_image_incoming_ = std::dynamic_pointer_cast<CaptureImage>(incoming_ptr_);
-    assert(capture_image_incoming_ != nullptr && ("Capture type mismatch. Processor " + getName() + " can only process captures of type CaptureImage").c_str());
-    // capture_image_incoming_ = std::static_pointer_cast<CaptureImage>(incoming_ptr_);
-
-    // Detect KeyPoints
-    capture_image_incoming_->keypoints_ = det_ptr_->detect(capture_image_incoming_->getImage());
-
-    //Sort keypoints by response if needed
-    if (!capture_image_incoming_->keypoints_.empty())
-    	vision_utils::sortByResponse(capture_image_incoming_->keypoints_, CV_SORT_DESCENDING);
-
-    // Compute Descriptors
-    capture_image_incoming_->descriptors_ = des_ptr_->getDescriptor(capture_image_incoming_->getImage(), capture_image_incoming_->keypoints_);
-
-    // Create and fill incoming grid
-    capture_image_incoming_->grid_features_ = std::make_shared<vision_utils::FeatureIdxGrid>(capture_image_incoming_->getImage().rows, capture_image_incoming_->getImage().cols, params_bundle_adjustment_->n_cells_v, params_bundle_adjustment_->n_cells_h);
-
-    capture_image_incoming_->grid_features_->insert(capture_image_incoming_->keypoints_);
-
-    if (last_ptr_ != nullptr)
-    {
-        // Get Capture
-        capture_image_last_ = std::static_pointer_cast<CaptureImage>(last_ptr_);
-        // Match image
-        capture_image_incoming_->matches_normalized_scores_ = mat_ptr_->robustMatch(capture_image_incoming_->keypoints_, capture_image_last_->keypoints_,
-                                                                                    capture_image_incoming_->descriptors_, capture_image_last_->descriptors_,
-                                                                                    des_ptr_->getSize(), capture_image_incoming_->matches_from_precedent_);
-
-
-        //TODO: Get only the best ones
-        if (params_bundle_adjustment_->delete_ambiguities) //filter ambiguities
-        {
-        	std::map<int,bool> ambiguities_map;
-        	for (auto match : capture_image_incoming_->matches_from_precedent_)
-        		if (ambiguities_map.count(match.trainIdx)) //if ambiguity
-        			ambiguities_map[match.trainIdx] = true; //ambiguity true
-        		else //if not ambiguity
-        			ambiguities_map[match.trainIdx] = false; //ambiguity false
-        	// Set capture map of match indices
-        	for (auto match : capture_image_incoming_->matches_from_precedent_)
-        	{
-        		if (!ambiguities_map.at(match.trainIdx))
-        			capture_image_last_->map_index_to_next_[match.trainIdx] = match.queryIdx; // map[last] = incoming
-        	}
-        }
-        else
-        {
-        	// Set capture map of match indices
-        	for (auto match : capture_image_incoming_->matches_from_precedent_)
-        		capture_image_last_->map_index_to_next_[match.trainIdx] = match.queryIdx; // map[last] = incoming
-
-        }
-
-
-//        //get points 2d from inlier matches
-//        PointVector pts1, pts2;
-//        for (auto match : capture_image_incoming_->matches_from_precedent_)
-//        {
-//        	pts1.push_back(capture_image_incoming_->keypoints_[match.queryIdx].pt);
-//        	pts2.push_back(capture_image_last_->keypoints_[match.trainIdx].pt);
-//        }
-//
-//        //find Essential Matrix
-//        std::vector<uchar> inliers(pts1.size(),0);
-//
-//        auto camera_mat = std::static_pointer_cast<SensorCamera>(capture_image_last_->getSensor())->getIntrinsicMatrix();
-//        cv::Mat camera_mat_cv;
-//        cv::eigen2cv(camera_mat, camera_mat_cv);
-//
-//        cv::Mat E = cv::findEssentialMat(pts1,
-//        					pts2,
-//							camera_mat_cv,
-//							CV_RANSAC,
-//							0.999,
-//							1.0,
-//							inliers);
-//
-//        //Compute rotation R and translation t from E
-//        cv::Mat R_cv;
-//        cv::Mat t_cv;
-//
-//        cv::recoverPose(E,pts1,pts2, camera_mat_cv, R_cv, t_cv, inliers);
-//
-//        //Convert R and t from OpenCV type to Eigen type and convert Rotation to a quaternion
-//        Eigen::Matrix3d R;
-//        cv::cv2eigen(R_cv, R);
-////        auto q = R2q(R); //Quaternion q
-//
-//        Eigen::Vector3d t; //Translation t
-//        cv::cv2eigen(t_cv,t);
-
-
-
-
-
-
-
-//        cv::Mat img_mat = cv::Mat(capture_image_incoming_->getImage().rows, capture_image_incoming_->getImage().cols, capture_image_incoming_->getImage().type());
-//
-//        cv::drawMatches(capture_image_incoming_->getImage(), 		 capture_image_incoming_->getKeypoints(),
-//        				capture_image_last_->getImage(),     capture_image_last_->getKeypoints(),
-//						capture_image_incoming_->matches_from_precedent_, img_mat);
-//
-//        cv::namedWindow("MATCHES VIEW", cv::WINDOW_NORMAL);
-//        cv::resizeWindow("MATCHES VIEW", 800,600);
-//        cv::imshow("MATCHES VIEW", img_mat);
-    }
-
-//    std::cout << "preProcess() done " << std::endl;
-
-}
-
-void ProcessorBundleAdjustment::postProcess()
-{
-
-    WOLF_INFO("last ", last_ptr_, "camera ", camera);
-
-    //=====================================Compute prior for the frame===============================================
-//    if (capture_image_last_)
-//	{
-//
-//		std::vector<cv::Point3f> landmark_points;
-//		std::vector<cv::Point2f> image_points;
-//
-//		//get camera information
-//		auto camera_mat = camera->getIntrinsicMatrix();
-//
-//		cv::Mat camera_mat_cv;
-//		cv::eigen2cv(camera_mat, camera_mat_cv);
-//
-//		auto dist_coeffs = camera->getDistortionVector();
-//
-//		Eigen::Vector5d dist;
-//		switch(dist_coeffs.size())
-//		{
-//		case 0:
-//		{
-//			dist << 0,0,0,0,0;
-//			break;
-//		}
-//		case 1:
-//		{
-//			dist << dist_coeffs(0),0,0,0,0;
-//			break;
-//		}
-//		case 2:
-//		{
-//			dist << dist_coeffs(0),dist_coeffs(1),0,0,0;
-//			break;
-//		}
-//		case 3:
-//		{
-//			dist << dist_coeffs(0),dist_coeffs(1),0,0,dist_coeffs(2);
-//			break;
-//		}
-//		default:
-//			dist << 0,0,0,0,0;
-//		}
-//
-//		cv::Mat dist_coeffs_cv;
-//		cv::eigen2cv(dist, dist_coeffs_cv);
-//
-//
-//
-//		//fill points and landmarks list
-//		int debug_counter = 0;
-//
-////		for (auto & feat : capture_image_last_->getFeatureList())
-////		{
-////			for (auto & fact : feat->getFactorList())
-////			{
-////				debug_counter++;
-////				//check the number of factors to the landmark
-////				if (fact->getLandmarkOther()->getConstrainedByList().size() >= 2)
-////				{
-////					//3d points
-////					auto point3d = std::static_pointer_cast<LandmarkHp>(fact->getLandmarkOther())->point();
-////					landmark_points.push_back(cv::Point3f(point3d(0),point3d(1),point3d(2)));
-////					//2d points
-////					auto point2d = feat->getMeasurement();
-////					image_points.push_back(cv::Point2f(point2d(0), point2d(1)));
-////				}
-////			}
-////		}
-//
-//		for (auto & feat : track_matrix_.snapshotAsList(capture_image_last_))
-//		{
-//			debug_counter++;
-//			auto trkId = feat->trackId();
-//			if (lmk_track_map_.count(feat->trackId()))
-//			{
-//				auto lmk_base = lmk_track_map_[trkId];
-////				if (lmk_base->getConstrainedByList().size() >= 2)
-//				{
-//					//3d points
-//					auto point3d = std::static_pointer_cast<LandmarkHp>(lmk_base)->point();
-//					landmark_points.push_back(cv::Point3f(point3d(0),point3d(1),point3d(2)));
-//					//2d points
-//					auto point2d = feat->getMeasurement();
-//					image_points.push_back(cv::Point2f(point2d(0), point2d(1)));
-//				}
-//			}
-//		}
-//
-//		WOLF_INFO("Num lmks in last:", debug_counter,"lmks constrained by >= 2 fctrs: ", landmark_points.size());
-//
-//		//solvePnP
-//		if (landmark_points.size() > 7)
-//		{
-//            WOLF_INFO("before PnP ", tvec, rvec);
-//            WOLF_INFO("before PnP ", last_ptr_->getFrame()->getState().transpose());
-//
-//
-//            cv::solvePnP(landmark_points, image_points, camera_mat_cv, dist_coeffs_cv, rvec, tvec, true);
-//		    WOLF_INFO("Solve PnP Done")
-//
-//		    //Compute and set Robot state
-//		    //q_w_s Camera quaternion
-//		    Eigen::Vector3d rvec_eigen;
-//		    cv::cv2eigen(rvec, rvec_eigen);
-//		    Eigen::Quaternion<double> q_w_s = v2q(rvec_eigen);
-//
-//		    //p_w_s Camera position
-//		    Eigen::Vector3d p_w_s;
-//		    cv::cv2eigen(tvec, p_w_s);
-//
-//		    //Robot pose
-//		    //	Eigen::Vector3d p_w_r = capture_image_last_->getFrame()->getP()->getState();
-//		    //	Eigen::Quaternion<double> q_w_r = Quaterniond(capture_image_last_->getFrame()->getO()->getState().data());
-//
-//		    //Extrinsics (camera in robot reference frame)
-//		    Eigen::Vector3d p_r_s = camera->getP()->getState();
-//		    Eigen::Quaternion<double> q_r_s = Quaterniond(camera->getO()->getState().data());
-//
-//		    //Robot pose compositions
-//		    Eigen::Quaternion<double> q_w_r = q_w_s * q_r_s.conjugate();
-//		    Eigen::Vector3d p_w_r = p_w_s - q_w_r * p_r_s;
-//
-//
-//
-//		    Eigen::Vector7d prior_state;
-//		    prior_state << p_w_r(0), p_w_r(1), p_w_r(2), q_w_r.x(), q_w_r.y(), q_w_r.z(), q_w_r.w();
-//
-//		    last_ptr_->getFrame()->setState(prior_state);
-//
-//            WOLF_INFO("after PnP  ", last_ptr_->getFrame()->getState().transpose());
-//            WOLF_INFO("after PnP  ", tvec, rvec);
-//
-//		}
-//
-//		//=================================================================================================================
-//	}
-
-    //delete landmarks
-    std::list<LandmarkBasePtr> lmks_to_remove;
-    for (auto & lmk : getProblem()->getMap()->getLandmarkList())
-    {
-        if (lmk->getConstrainedByList().size()==1)
-        {
-            if (lmk->getConstrainedByList().front()->getFeature()->getCapture() != origin_ptr_)
-            {
-                WOLF_INFO("Removing landmark: ", lmk->id());
-                lmk_track_map_.erase(lmk->getConstrainedByList().front()->getFeature()->trackId());
-                WOLF_INFO("Lmk deleted from track map: ", lmk->id());
-                lmks_to_remove.push_back(lmk);
-           }
-        }
-    }
-    for (auto & lmk : lmks_to_remove)
-    {
-        lmk->remove();
-        WOLF_INFO("Lmk deleted: ", lmk->id());
-    }
-    getProblem()->check(0);
-
-
-    // draw debug image ======================================================================================
-
-    // image to draw
-    CaptureBasePtr image_to_draw = last_ptr_;
-
-
-    std::map<int,std::list<vision_utils::KeyPointEnhanced> > kp_enh_tracks;
-    for (auto const & feat_base : image_to_draw->getFeatureList())
-    {
-        FeaturePointImagePtr feat = std::static_pointer_cast<FeaturePointImage>(feat_base);
-        unsigned int feat_id = feat->id();
-        unsigned int track_id = feat->trackId();
-        // tracks
-        std::list<vision_utils::KeyPointEnhanced> kp_enh_track;
-        for (auto feat_base_track : track_matrix_.trackAsVector(track_id))
-        {
-            FeaturePointImagePtr feat_img = std::static_pointer_cast<FeaturePointImage>(feat_base_track);
-            vision_utils::KeyPointEnhanced kp_enh(feat_img->getKeypoint(),
-                                                  feat_id,
-                                                  track_id,
-                                                  track_matrix_.trackSize(track_id),
-                                                  feat_img->getMeasurementCovariance());
-            kp_enh_track.push_back(kp_enh);
-        }
-
-        kp_enh_tracks[feat_id] = kp_enh_track;
-    }
-    // DEBUG
-    image_debug_ = vision_utils::buildImageProcessed((std::static_pointer_cast<CaptureImage>(image_to_draw))->getImage(), kp_enh_tracks);
-//    Snapshot snapshot_last = track_matrix_.snapshot(last_ptr_);
-////	getProblem()->print(4,1,1,0);
-//    for (auto pair : track_matrix_.snapshot(origin_ptr_))
-//    {
-//    	if (snapshot_last.count(pair.first)==0)
-//    	{
-//    		if (!(pair.second->getFactorList()).empty())
-//    		{
-//				auto factor = pair.second->getFactorList().front();
-//				if (factor)
-//				{
-//					auto lmk = factor->getLandmarkOther();
-//					if (lmk)
-//					{
-////						lmk->remove();
-////	    	    		track_matrix_.remove(pair.second->trackId());
-//					}
-//				}
-//    		}
-//    	}
-//    }
-//    getProblem()->print(0,0,1,0);
-
-    list<FeatureBasePtr> snapshot = track_matrix_.snapshotAsList(image_to_draw);
-
-    for (auto const & feat_base : snapshot)
-    {
-        FeaturePointImagePtr feat = std::static_pointer_cast<FeaturePointImage>(feat_base);
-        unsigned int track_id = feat->trackId();
-		if (lmk_track_map_.count(track_id))
-        {
-			Vector3d point3d = std::static_pointer_cast<LandmarkHp>(lmk_track_map_[track_id])->point();
-
-			Transform<double,3,Isometry> T_w_r
-		        = Translation<double,3>(feat_base->getFrame()->getP()->getState())
-		        * Quaterniond(feat_base->getFrame()->getO()->getState().data());
-		    Transform<double,3,Isometry> T_r_c
-				= Translation<double,3>(feat_base->getCapture()->getSensorP()->getState())
-		        * Quaterniond(feat_base->getCapture()->getSensorO()->getState().data());
-
-		    Eigen::Matrix<double, 3, 1> point3d_c = T_r_c.inverse()
-												   * T_w_r.inverse()
-		                                           * point3d;
-
-//		    SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(getSensor());
-
-		    Vector2d point2d = pinhole::projectPoint(getSensor()->getIntrinsic()->getState(), camera->getDistortionVector(), point3d_c);
-    		cv::Point point = cv::Point(point2d(0), point2d(1));
-
-    		cv::circle(image_debug_, point, 3, cv::Scalar(0,0,255) , 1 , 8);
-    		std::string id = std::to_string(lmk_track_map_[track_id]->id());
-    		cv::putText(image_debug_, id, point, cv::FONT_HERSHEY_DUPLEX, 0.5, CV_RGB(0,255,0),2);
-		}
-	}
-
-    // frame counter for voting for KF --- if not used in voteForKf, this can be removed.
-    frame_count_ ++;
-
-//    cv::namedWindow("DEBUG VIEW", cv::WINDOW_NORMAL);
-//    cv::resizeWindow("DEBUG VIEW", 800,600);
-//    cv::imshow("DEBUG VIEW", image_debug_);
-//    cv::waitKey(1);
-}
-
-
-unsigned int ProcessorBundleAdjustment::trackFeatures(const FeatureBasePtrList& _features_in,
-                                                      const CaptureBasePtr& _capture,
-                                                      FeatureBasePtrList& _features_out,
-                                                      FeatureMatchMap& _feature_correspondences)
-{
-    for (auto feat_base: _features_in)
-    {
-        FeaturePointImagePtr feat_last = std::static_pointer_cast<FeaturePointImage>(feat_base);
-
-        if (capture_image_last_->map_index_to_next_.count(feat_last->getIndexKeyPoint())) //If the track to incoming exists
-        {
-            int index_inc = capture_image_last_->map_index_to_next_.at(feat_last->getIndexKeyPoint());
-
-            if (true)//capture_image_incoming_->matches_normalized_scores_.at(index_inc) > mat_ptr_->getParams()->min_norm_score )
-            {
-                // Get kp incoming and keypoint last
-                cv::KeyPoint kp_inc = capture_image_incoming_->keypoints_.at(index_inc);
-                cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(feat_last->getIndexKeyPoint());
-
-                if (isInlier(kp_last, kp_inc))
-                {
-					FeatureBasePtr feat_inc = FeatureBase::emplace<FeaturePointImage>(_capture,
-					                                                                  kp_inc,
-					                                                                  index_inc,
-					                                                                  capture_image_incoming_->descriptors_.row(index_inc),
-					                                                                  pixel_cov_);
-
-					_features_out.push_back(feat_inc);
-
-					auto feature_match_ptr = std::make_shared<FeatureMatch>();
-					feature_match_ptr->feature_ptr_= feat_last;
-					feature_match_ptr->normalized_score_ = 1.0;//capture_image_incoming_->matches_normalized_scores_.at(index_inc);
-					_feature_correspondences[feat_inc] = feature_match_ptr;
-
-
-					// hit cell to acknowledge there's a tracked point in that cell
-					capture_image_incoming_->grid_features_->hitTrackingCell(kp_inc);
-                }
-            }
-        }
-    }
-//    return _feature_correspondences.size();
-    return _features_out.size();
-}
-
-bool ProcessorBundleAdjustment::isInlier(const cv::KeyPoint& _kp_last, const cv::KeyPoint& _kp_incoming) const
-{
-    // List of conditions
-    bool inlier = true;
-
-    // A. Check euclidean norm
-    inlier = inlier && (cv::norm(_kp_incoming.pt-_kp_last.pt) < mat_ptr_->getParams()->max_match_euclidean_dist);
-
-    // B. add your new condition test here
-    // inlier = inlier && ...
-
-    return inlier;
-}
-
-bool ProcessorBundleAdjustment::is_tracked(int& _kp_idx) const
-{
-
-    for (auto ftr : known_features_incoming_)
-    {
-        FeaturePointImagePtr ftr_kp = std::static_pointer_cast<FeaturePointImage>(ftr);
-        if (ftr_kp->getIndexKeyPoint() == _kp_idx)
-        {
-            return true;
-        }
-    }
-    return false;
-}
-
-unsigned int ProcessorBundleAdjustment::detectNewFeatures(const int& _max_new_features,
-                                                          const CaptureBasePtr& _capture,
-                                                          FeatureBasePtrList& _features_out)
-{
-    //TODO: efficient implementation?
-
-////Simpler method to detect new features without using Grid Features form vision_utils
-//    typedef std::map<int,int>::iterator iter;
-//
-//    for (iter it = capture_image_last_->map_index_to_next_.begin(); it!=capture_image_last_->map_index_to_next_.end(); ++it)
-//    {
-//        if (_features_out.size() >= _max_new_features)
-//        		break;
-//
-//        else if(!is_tracked(it->second))
-//        {
-//            //add to _features_out
-//            int idx_last = it->first;
-//            cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(idx_last);
-//            FeaturePointImagePtr feat_last_ = std::make_shared<FeaturePointImage>(kp_last, idx_last,
-//                                                                                 capture_image_last_->descriptors_.row(idx_last),
-//                                                                                 pixel_cov_);
-//            _features_out.push_back(feat_last_);
-//
-//        }
-//    }
-//
-//    return _features_out.size();
-
-	for (unsigned int n_iterations = 0; _max_new_features == -1 || n_iterations < _max_new_features; ++n_iterations)
-	{
-		Eigen::Vector2i cell_last;
-		assert(capture_image_last_->grid_features_!=nullptr);
-		if (capture_image_last_->grid_features_->pickEmptyTrackingCell(cell_last))
-		{
-			// Get best keypoint in cell
-			vision_utils::FeatureIdxMap cell_feat_map = capture_image_last_->grid_features_->getFeatureIdxMap(cell_last(0), cell_last(1), params_bundle_adjustment_->min_response_new_feature);
-			bool found_feature_in_cell = false;
-
-			for (auto target_last_pair_response_idx : cell_feat_map)
-			{
-				// Get KeyPoint in last
-				unsigned int index_last = target_last_pair_response_idx.second;
-				cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(index_last);
-				assert(target_last_pair_response_idx.first == kp_last.response && "[ProcessorTrackerFeatureTrifocal::detectNewFeatures]: response mismatch.");
-
-				// Check if there is match with incoming, if not we do not want it
-				if (capture_image_last_->map_index_to_next_.count(index_last))
-				{
-					// matching keypoint in incoming
-                  	unsigned int index_incoming = capture_image_last_->map_index_to_next_[index_last];
-                    cv::KeyPoint kp_incoming = capture_image_incoming_->keypoints_.at(index_incoming);
-
-					// validate match with extra tests
-					if (isInlier( kp_incoming, kp_last))
-					{
-						// Create WOLF feature
-						FeatureBasePtr ftr_point_last = FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                                                kp_last,
-                                                                                                index_last,
-                                                                                                capture_image_last_->descriptors_.row(index_last),
-                                                                                                pixel_cov_);
-
-						_features_out.push_back(ftr_point_last);
-
-						// hit cell to acknowledge there's a tracked point in that cell
-						capture_image_last_->grid_features_->hitTrackingCell(kp_last);
-
-						found_feature_in_cell = true;
-
-						break; // Good kp found for this grid. //Use to have only one keypoint per grid
-                    }
-				}
-			}
-			if (!found_feature_in_cell)
-				capture_image_last_->grid_features_->blockTrackingCell(cell_last);
-		}
-		else
-			break; // There are no empty cells
-	}
-	return _features_out.size();
-}
-
-
-bool ProcessorBundleAdjustment::correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature)
-{
-    //TODO: Implement if needed
-    return true;
-}
-
-bool ProcessorBundleAdjustment::voteForKeyFrame() const
-{
-
-
-
-    return ((frame_count_ % 5) == 0);
-
-
-
-
-
-
-	//    // A. crossing voting threshold with ascending number of features
-	    bool vote_up = false;
-	//    // 1. vote if we did not have enough features before
-	//    vote_up = vote_up && (previousNumberOfTracks() < params_bundle_adjustment_->min_features_for_keyframe);
-	//    // 2. vote if we have enough features now
-	//    vote_up = vote_up && (incoming_ptr_->getFeatureList().size() >= params_bundle_adjustment_->min_features_for_keyframe);
-
-	    // B. crossing voting threshold with descending number of features
-	    bool vote_down = true;
-	    // 1. vote if we had enough features before
-	//    vote_down = vote_down && (last_ptr_->getFeatureList().size() >= params_bundle_adjustment_->min_features_for_keyframe);
-	    // 2. vote if we have not enough features now
-	    vote_down = vote_down && (incoming_ptr_->getFeatureList().size() < params_bundle_adjustment_->min_features_for_keyframe);
-
-	//    // C. Time-based policies
-	    bool vote_time = false;
-	////    vote_time = vote_time || (incoming_ptr_->getTimeStamp()-origin_ptr_->getTimeStamp() > 1.0);
-	//
-	//    if (vote_up)
-	//        WOLF_TRACE("VOTE UP");
-	//    if (vote_down)
-	//        WOLF_TRACE("VOTE DOWN");
-	//    if (vote_time)
-	//        WOLF_TRACE("VOTE TIME");
-	    if (vote_down)
-	    	WOLF_INFO("Creating KF. Number of features: ", incoming_ptr_->getFeatureList().size());
-
-	    return vote_up || vote_down || vote_time;
-}
-
-
-FactorBasePtr ProcessorBundleAdjustment::emplaceFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr)
-{
-    /* This function is no creating any factor.
-     *  We create factors with establishFactors()
-     */
-    return FactorBasePtr();
-}
-
-LandmarkBasePtr ProcessorBundleAdjustment::emplaceLandmark(FeatureBasePtr _feature_ptr)
-{
-    FeaturePointImagePtr feat_point_image_ptr = std::static_pointer_cast<FeaturePointImage>( _feature_ptr);
-    Eigen::Vector2d point2d = _feature_ptr->getMeasurement();
-
-    Eigen::Vector3d point3d;
-    point3d = pinhole::backprojectPoint(
-    		getSensor()->getIntrinsic()->getState(),
-    		(std::static_pointer_cast<SensorCamera>(getSensor()))->getCorrectionVector(),
-			point2d);
-
-
-
-    //double distance = params_bundle_adjustment_->distance; // arbitrary value
-    double distance = 1;
-    Eigen::Vector4d vec_homogeneous_c;
-    vec_homogeneous_c = {point3d(0),point3d(1),point3d(2),point3d.norm()/distance};
-    vec_homogeneous_c.normalize();
-
-    //TODO: lmk from camera to world coordinate frame.
-    Transform<double,3,Isometry> T_w_r
-        = Translation<double,3>(_feature_ptr->getFrame()->getP()->getState())
-        * Quaterniond(_feature_ptr->getFrame()->getO()->getState().data());
-    Transform<double,3,Isometry> T_r_c
-		= Translation<double,3>(_feature_ptr->getCapture()->getSensorP()->getState())
-        * Quaterniond(_feature_ptr->getCapture()->getSensorO()->getState().data());
-    Eigen::Matrix<double, 4, 1> vec_homogeneous_w = T_w_r
-                                           * T_r_c
-                                           * vec_homogeneous_c;
-
-
-//    std::cout << "Point2d: " << point2d.transpose() << std::endl;
-//    std::cout << "Point3d: " << point3d.transpose() << std::endl;
-//    std::cout << "Hmg_c: " << vec_homogeneous.transpose() << std::endl;
-//    std::cout << "Hmg_w: " << vec_homogeneous_w.transpose() << std::endl;
-    //vec_homogeneous_w = vec_homogeneous;
-
-    LandmarkBasePtr lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(), vec_homogeneous_w, getSensor(), feat_point_image_ptr->getDescriptor());
-
-//    std::cout << "LANDMARKS CREATED AND ADDED to MAP: " << getProblem()->getMap()->getLandmarkList().size() << std::endl;
-
-    _feature_ptr->setLandmarkId(lmk_hp_ptr->id());
-    return lmk_hp_ptr;
-}
-
-void ProcessorBundleAdjustment::establishFactors()
-{
-
-//	if (origin_ptr_ == last_ptr_)
-//		return;
-//
-//	TrackMatches matches_origin_inc = track_matrix_.matches(origin_ptr_, last_ptr_);
-//
-//    for (auto const & pair_trkid_pair : matches_origin_inc)
-//    {
-//        size_t trkid = pair_trkid_pair.first;
-//        //if track size is lower than a minimum, don't establish any factor.
-//        if (track_matrix_.trackSize(trkid) < params_bundle_adjustment_->min_track_length_for_factor)
-//        	continue;
-//
-//        FeatureBasePtr feature_origin = pair_trkid_pair.second.first;
-//        FeatureBasePtr feature_last   = pair_trkid_pair.second.second;
-//
-//        if (lmk_track_map_.count(trkid)==0) //if the track doesn't have landmark associated -> we need a map: map[track_id] = landmarkptr
-//        {
-//        	//emplaceLandmark
-//        	LandmarkBasePtr lmk = emplaceLandmark(feature_origin);
-//        	LandmarkHpPtr lmk_hp = std::static_pointer_cast<LandmarkHp>(lmk);
-//
-//        	//add only one Landmark to map: map[track_id] = landmarkptr
-//        	lmk_track_map_[trkid] = lmk;
-//
-//        	//emplace a factor for each feature in the track (only for keyframes)
-//        	Track full_track = track_matrix_.trackAtKeyframes(trkid);
-//        	for (auto it=full_track.begin(); it!=full_track.end(); ++it)
-//        	{
-//        		FactorBase::emplace<FactorPixelHp>(it->second, it->second, lmk_hp, shared_from_this());
-//        	}
-//
-//        }
-//        else
-//        {
-//        	LandmarkBasePtr lmk = lmk_track_map_[trkid];
-//        	LandmarkHpPtr lmk_hp = std::static_pointer_cast<LandmarkHp>(lmk);
-//
-//        	//Create factor
-//        	FactorBase::emplace<FactorPixelHp>(feature_last, feature_last, lmk_hp, shared_from_this());
-//        }
-//
-//    }
-
-
-
-	for (auto & pair_id_ftr : track_matrix_.snapshot(last_ptr_))
-	{
-		auto & trkid = pair_id_ftr.first;
-		auto & ftr = pair_id_ftr.second;
-
-		if (lmk_track_map_.count(trkid)==0) //if the track doesn't have landmark associated -> we need a map: map[track_id] = landmarkptr
-		{
-			//emplaceLandmark
-			LandmarkBasePtr lmk = emplaceLandmark(ftr);
-			LandmarkHpPtr lmk_hp = std::static_pointer_cast<LandmarkHp>(lmk);
-
-			//add only one Landmark to map: map[track_id] = landmarkptr
-			lmk_track_map_[trkid] = lmk;
-
-			//emplace a factor
-			FactorBase::emplace<FactorPixelHp>(ftr, ftr, lmk_hp, shared_from_this(), params_->apply_loss_function);
-
-		}
-		else
-		{
-			// recover the landmark
-			LandmarkBasePtr lmk = lmk_track_map_[trkid];
-			LandmarkHpPtr lmk_hp = std::static_pointer_cast<LandmarkHp>(lmk);
-
-			//emplace a factor
-			FactorBase::emplace<FactorPixelHp>(ftr, ftr, lmk_hp, shared_from_this(), params_->apply_loss_function);
-		}
-
-	}
-}
-
-void ProcessorBundleAdjustment::setParams(const ParamsProcessorBundleAdjustmentPtr _params)
-{
-    params_bundle_adjustment_ = _params;
-}
-
-ProcessorBasePtr ProcessorBundleAdjustment::create(const std::string& _unique_name,
-                                                         const ParamsProcessorBasePtr _params)
-{
-  const auto params = std::static_pointer_cast<ParamsProcessorBundleAdjustment>(_params);
-
-  ProcessorBasePtr prc_ptr = std::make_shared<ProcessorBundleAdjustment>(params);
-  prc_ptr->setName(_unique_name);
-  return prc_ptr;
-}
-
-} //namespace wolf
-
-// Register in the FactoryProcessor
-#include "core/processor/factory_processor.h"
-namespace wolf {
-WOLF_REGISTER_PROCESSOR(ProcessorBundleAdjustment)
-} // namespace wolf
-
diff --git a/src/processor/processor_tracker_feature_image.cpp b/src/processor/processor_tracker_feature_image.cpp
deleted file mode 100644
index 474684ced0a5b85e40e477c78e9002feaf83f25f..0000000000000000000000000000000000000000
--- a/src/processor/processor_tracker_feature_image.cpp
+++ /dev/null
@@ -1,371 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-// Wolf includes
-#include "vision/processor/processor_tracker_feature_image.h"
-
-// Vision utils
-#include <vision_utils/detectors.h>
-#include <vision_utils/descriptors.h>
-#include <vision_utils/matchers.h>
-#include <vision_utils/algorithms.h>
-
-// standard libs
-#include <bitset>
-#include <algorithm>
-
-namespace wolf
-{
-
-ProcessorTrackerFeatureImage::ProcessorTrackerFeatureImage(ParamsProcessorTrackerFeatureImagePtr _params_tracker_feature_image) :
-    ProcessorTrackerFeature("ProcessorTrackerFeatureImage", "PO", 3, _params_tracker_feature_image),
-    cell_width_(0), cell_height_(0),  // These will be initialized to proper values taken from the sensor via function configure()
-    params_tracker_feature_image_(_params_tracker_feature_image)
-{
-	// Detector
-    std::string det_name = vision_utils::readYamlType(params_tracker_feature_image_->yaml_file_params_vision_utils, "detector");
-    det_ptr_ = vision_utils::setupDetector(det_name, det_name + " detector", params_tracker_feature_image_->yaml_file_params_vision_utils);
-
-    // Descriptor
-    std::string des_name = vision_utils::readYamlType(params_tracker_feature_image_->yaml_file_params_vision_utils, "descriptor");
-    des_ptr_ = vision_utils::setupDescriptor(des_name, des_name + " descriptor", params_tracker_feature_image_->yaml_file_params_vision_utils);
-
-    // Matcher
-    std::string mat_name = vision_utils::readYamlType(params_tracker_feature_image_->yaml_file_params_vision_utils, "matcher");
-    mat_ptr_ = vision_utils::setupMatcher(mat_name, mat_name + " matcher", params_tracker_feature_image_->yaml_file_params_vision_utils);
-
-    // Active search grid
-    vision_utils::AlgorithmBasePtr alg_ptr = vision_utils::setupAlgorithm("ACTIVESEARCH", "ACTIVESEARCH algorithm", params_tracker_feature_image_->yaml_file_params_vision_utils);
-    active_search_ptr_ = std::static_pointer_cast<vision_utils::AlgorithmACTIVESEARCH>(alg_ptr);
-}
-
-//Destructor
-ProcessorTrackerFeatureImage::~ProcessorTrackerFeatureImage()
-{
-    //
-}
-
-void ProcessorTrackerFeatureImage::configure(SensorBasePtr _sensor)
-{
-    SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(_sensor);
-
-    image_.width_ = camera->getImgWidth();
-    image_.height_ = camera->getImgHeight();
-
-    active_search_ptr_->initAlg(camera->getImgWidth(), camera->getImgHeight() , det_ptr_->getPatternRadius());
-
-    params_tracker_feature_image_activesearch_ptr_ = std::static_pointer_cast<vision_utils::AlgorithmParamsACTIVESEARCH>( active_search_ptr_->getParams() );
-
-    cell_width_ = image_.width_ / params_tracker_feature_image_activesearch_ptr_->n_cells_h;
-    cell_height_ = image_.height_ / params_tracker_feature_image_activesearch_ptr_->n_cells_v;
-}
-
-void ProcessorTrackerFeatureImage::preProcess()
-{
-    auto incoming_ptr = std::dynamic_pointer_cast<CaptureImage>(incoming_ptr_);
-    assert(incoming_ptr != nullptr && ("Capture type mismatch. Processor " + getName() + " can only process captures of type CaptureImage").c_str());
-    image_incoming_ = incoming_ptr->getImage();
-
-    active_search_ptr_->renew();
-
-    //The visualization functions and variables
-    if(last_ptr_ != nullptr)
-        resetVisualizationFlag(last_ptr_->getFeatureList());
-
-    tracker_roi_.clear();
-    detector_roi_.clear();
-    tracker_target_.clear();
-}
-
-void ProcessorTrackerFeatureImage::postProcess()
-{
-}
-
-unsigned int ProcessorTrackerFeatureImage::trackFeatures(const FeatureBasePtrList& _features_in,
-                                                         const CaptureBasePtr& _capture,
-                                                         FeatureBasePtrList& _features_out,
-                                                         FeatureMatchMap& _feature_matches)
-{
-    KeyPointVector candidate_keypoints;
-    cv::Mat candidate_descriptors;
-    DMatchVector cv_matches;
-
-    for (auto feature_base_ptr : _features_in)
-    {
-        FeaturePointImagePtr feature_ptr = std::static_pointer_cast<FeaturePointImage>(feature_base_ptr);
-
-        cv::Rect roi = vision_utils::setRoi(feature_ptr->getKeypoint().pt.x, feature_ptr->getKeypoint().pt.y, cell_width_, cell_height_);
-
-        active_search_ptr_->hitCell(feature_ptr->getKeypoint());
-
-        cv::Mat target_descriptor = feature_ptr->getDescriptor();
-
-        //lists used to debug
-        tracker_target_.push_back(feature_ptr->getKeypoint().pt);
-        tracker_roi_.push_back(roi);
-
-        if (detect(image_incoming_, roi, candidate_keypoints, candidate_descriptors))
-        {
-            double normalized_score = match(target_descriptor,candidate_descriptors,cv_matches);
-            if ( normalized_score > mat_ptr_->getParams()->min_norm_score )
-            {
-                auto incoming_point_ptr = FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                                  candidate_keypoints[cv_matches[0].trainIdx],
-                                                                                  cv_matches[0].trainIdx,
-                                                                                  (candidate_descriptors.row(cv_matches[0].trainIdx)),
-                                                                                  Eigen::Matrix2d::Identity()*params_tracker_feature_image_->pixel_noise_var);
-
-                std::static_pointer_cast<FeaturePointImage>(incoming_point_ptr)->setIsKnown(feature_ptr->isKnown());
-                _features_out.push_back(incoming_point_ptr);
-
-                _feature_matches[incoming_point_ptr] = std::make_shared<FeatureMatch>(FeatureMatch({feature_base_ptr, normalized_score}));
-            }
-            else
-            {
-                //lists used to debug
-                tracker_target_.pop_back();
-                tracker_roi_.pop_back();
-            }
-        }
-        else
-        {
-           //lists used to debug
-            tracker_target_.pop_back();
-            tracker_roi_.pop_back();
-        }
-    }
-//    std::cout << "TrackFeatures - Number of Features tracked: " << _features_out.size() << std::endl;
-    return _features_out.size();
-}
-
-bool ProcessorTrackerFeatureImage::correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature)
-{
-    DMatchVector matches_mat;
-    FeaturePointImagePtr feat_incoming_ptr = std::static_pointer_cast<FeaturePointImage>(_incoming_feature);
-    FeaturePointImagePtr feat_origin_ptr = std::static_pointer_cast<FeaturePointImage>(_origin_feature);
-
-    cv::Mat origin_descriptor = feat_origin_ptr->getDescriptor();
-    cv::Mat incoming_descriptor = feat_incoming_ptr->getDescriptor();
-
-    KeyPointVector origin_keypoint;
-    origin_keypoint.push_back(feat_origin_ptr->getKeypoint());
-
-    double normalized_score = match(origin_descriptor,incoming_descriptor,matches_mat);
-
-    if(normalized_score > mat_ptr_->getParams()->min_norm_score)
-        return true;
-    else
-    {
-        /* CORRECT */
-
-        unsigned int roi_width = cell_width_;
-        unsigned int roi_heigth = cell_height_;
-        unsigned int roi_x;
-        unsigned int roi_y;
-
-        KeyPointVector correction_keypoints;
-        cv::Mat correction_descriptors;
-        DMatchVector correction_matches;
-
-        FeaturePointImagePtr feat_last_ptr = std::static_pointer_cast<FeaturePointImage>(_last_feature);
-
-        active_search_ptr_->hitCell(feat_last_ptr->getKeypoint());
-
-        roi_x = (feat_last_ptr->getKeypoint().pt.x) - (roi_heigth / 2);
-        roi_y = (feat_last_ptr->getKeypoint().pt.y) - (roi_width / 2);
-        cv::Rect roi(roi_x, roi_y, roi_width, roi_heigth);
-
-        if (detect(image_incoming_, roi, correction_keypoints, correction_descriptors))
-        {
-            double normalized_score_correction = match(origin_descriptor,correction_descriptors,correction_matches);
-            if(normalized_score_correction > mat_ptr_->getParams()->min_norm_score )
-            {
-                feat_incoming_ptr->setKeypoint(correction_keypoints[correction_matches[0].trainIdx]);
-                feat_incoming_ptr->setDescriptor(correction_descriptors.row(correction_matches[0].trainIdx));
-                return true;
-            }
-            else
-            {
-                return false;
-            }
-        }
-        return false;
-    }
-}
-
-bool ProcessorTrackerFeatureImage::voteForKeyFrame() const
-{
-    return (incoming_ptr_->getFeatureList().size() < params_tracker_feature_image_->min_features_for_keyframe);
-}
-
-unsigned int ProcessorTrackerFeatureImage::detectNewFeatures(const int& _max_new_features,
-                                                             const CaptureBasePtr& _capture,
-                                                             FeatureBasePtrList& _features_out)
-{
-    cv::Rect roi;
-    KeyPointVector new_keypoints;
-    cv::Mat new_descriptors;
-    cv::KeyPointsFilter keypoint_filter;
-    unsigned int n_new_features = 0;
-
-    for (unsigned int n_iterations = 0; _max_new_features == -1 || n_iterations < _max_new_features; n_iterations++)
-    {
-
-        if (active_search_ptr_->pickEmptyRoi(roi))
-        {
-            detector_roi_.push_back(roi);
-            if (detect(image_last_, roi, new_keypoints, new_descriptors))
-            {
-                KeyPointVector list_keypoints = new_keypoints;
-                unsigned int index = 0;
-                keypoint_filter.retainBest(new_keypoints,1);
-                for(unsigned int i = 0; i < list_keypoints.size(); i++)
-                {
-                    if(list_keypoints[i].pt == new_keypoints[0].pt)
-                        index = i;
-                }
-                if(new_keypoints[0].response > params_tracker_feature_image_activesearch_ptr_->min_response_new_feature)
-                {
-                    std::cout << "response: " << new_keypoints[0].response << std::endl;
-                    auto point_ptr = FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                             new_keypoints[0],
-                                                                             0,
-                                                                             new_descriptors.row(index),
-                                                                             Eigen::Matrix2d::Identity()*params_tracker_feature_image_->pixel_noise_var);
-
-                    std::static_pointer_cast<FeaturePointImage>(point_ptr)->setIsKnown(false);
-
-                    _features_out.push_back(point_ptr);
-
-                    active_search_ptr_->hitCell(new_keypoints[0]);
-
-                    n_new_features++;
-                }
-            }
-            else
-            {
-                active_search_ptr_->blockCell(roi);
-            }
-        }
-        else
-            break;
-    }
-
-    WOLF_DEBUG( "DetectNewFeatures - Number of new features detected: " , n_new_features );
-    return n_new_features;
-}
-
-//============================================================
-
-double ProcessorTrackerFeatureImage::match(cv::Mat _target_descriptor, cv::Mat _candidate_descriptors, DMatchVector& _cv_matches)
-{
-    mat_ptr_->match(_target_descriptor, _candidate_descriptors, des_ptr_->getSize(), _cv_matches);
-    double normalized_score = 1 - (double)(_cv_matches[0].distance)/(des_ptr_->getSize()*8);
-    return normalized_score;
-}
-
-FactorBasePtr ProcessorTrackerFeatureImage::emplaceFactor(FeatureBasePtr _feature_ptr,
-                                                          FeatureBasePtr _feature_other_ptr)
-{
-    return FactorBase::emplace<FactorEpipolar>(_feature_ptr, _feature_ptr, _feature_other_ptr, shared_from_this(), params_->apply_loss_function);
-}
-
-unsigned int ProcessorTrackerFeatureImage::detect(cv::Mat _image, cv::Rect& _roi, std::vector<cv::KeyPoint>& _new_keypoints,
-                                    cv::Mat& _new_descriptors)
-{
-    _new_keypoints = det_ptr_->detect(_image, _roi);
-    _new_descriptors = des_ptr_->getDescriptor(_image, _new_keypoints);
-    return _new_keypoints.size();
-}
-
-void ProcessorTrackerFeatureImage::resetVisualizationFlag(const FeatureBasePtrList& _feature_list_last)
-{
-    for (auto feature_base_last_ptr : _feature_list_last)
-    {
-        FeaturePointImagePtr feature_last_ptr = std::static_pointer_cast<FeaturePointImage>(feature_base_last_ptr);
-        feature_last_ptr->setIsKnown(true);
-    }
-}
-
-// draw functions ===================================================================
-
-void ProcessorTrackerFeatureImage::drawTarget(cv::Mat _image, std::list<cv::Point> _target_list)
-{
-    if (last_ptr_!=nullptr)
-    {
-        // draw the target of the tracking
-        for(auto target_point : _target_list)
-            cv::circle(_image, target_point, 7, cv::Scalar(255.0, 0.0, 255.0), 1, 3, 0);
-    }
-}
-
-void ProcessorTrackerFeatureImage::drawRoi(cv::Mat _image, std::list<cv::Rect> _roi_list, cv::Scalar _color)
-{
-    if (last_ptr_!=nullptr)
-    {
-        for (auto roi : _roi_list)
-            cv::rectangle(_image, roi, _color, 1, 8, 0);
-    }
-}
-
-void ProcessorTrackerFeatureImage::drawFeatures(cv::Mat _image)
-{
-    if (last_ptr_!=nullptr)
-    {
-        unsigned int known_feature_counter = 0;
-        unsigned int new_feature_counter = 0;
-
-        for (auto feature_ptr : last_ptr_->getFeatureList())
-        {
-            FeaturePointImagePtr point_ptr = std::static_pointer_cast<FeaturePointImage>(feature_ptr);
-            if (point_ptr->isKnown())
-            {
-                cv::circle(_image, point_ptr->getKeypoint().pt, 4, cv::Scalar(51.0, 255.0, 51.0), -1, 3, 0);
-                known_feature_counter++;
-            }
-            else
-            {
-                cv::circle(_image, point_ptr->getKeypoint().pt, 4, cv::Scalar(0.0, 0.0, 255.0), -1, 3, 0);
-                new_feature_counter++;
-            }
-
-            cv::putText(_image, std::to_string(feature_ptr->trackId()), point_ptr->getKeypoint().pt,
-                        cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 255.0, 0.0));
-        }
-        std::cout << "\nKnown: " << known_feature_counter << "\tNew: " << new_feature_counter << std::endl;
-    }
-}
-
-ProcessorBasePtr ProcessorTrackerFeatureImage::create(const std::string& _unique_name, const ParamsProcessorBasePtr _params)
-{
-    ProcessorTrackerFeatureImagePtr prc_ptr = std::make_shared<ProcessorTrackerFeatureImage>(std::static_pointer_cast<ParamsProcessorTrackerFeatureImage>(_params));
-    prc_ptr->setName(_unique_name);
-    return prc_ptr;
-}
-
-} // namespace wolf
-
-// Register in the FactorySensor
-#include "core/processor/factory_processor.h"
-namespace wolf {
-WOLF_REGISTER_PROCESSOR(ProcessorTrackerFeatureImage)
-} // namespace wolf
-
diff --git a/src/processor/processor_tracker_feature_trifocal.cpp b/src/processor/processor_tracker_feature_trifocal.cpp
deleted file mode 100644
index 8977ca6ef02169f468e49e714495b023fb7c7bdb..0000000000000000000000000000000000000000
--- a/src/processor/processor_tracker_feature_trifocal.cpp
+++ /dev/null
@@ -1,501 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-
-// wolf
-#include "vision/processor/processor_tracker_feature_trifocal.h"
-
-#include "vision/sensor/sensor_camera.h"
-#include "vision/feature/feature_point_image.h"
-#include "vision/factor/factor_trifocal.h"
-#include "vision/capture/capture_image.h"
-
-// vision_utils
-#include <vision_utils/vision_utils.h>
-#include <vision_utils/detectors.h>
-#include <vision_utils/descriptors.h>
-#include <vision_utils/matchers.h>
-#include <vision_utils/algorithms.h>
-
-#include <memory>
-
-namespace wolf {
-
-//// DEBUG =====================================
-//debug_tTmp = clock();
-//WOLF_TRACE("======== DetectNewFeatures =========");
-//// ===========================================
-//
-//// DEBUG =====================================
-//debug_comp_time_ = (double)(clock() - debug_tTmp) / CLOCKS_PER_SEC;
-//WOLF_TRACE("--> TIME: Detect New Features: detect ",debug_comp_time_);
-//// ===========================================
-
-// Constructor
-ProcessorTrackerFeatureTrifocal::ProcessorTrackerFeatureTrifocal(ParamsProcessorTrackerFeatureTrifocalPtr _params_tracker_feature_trifocal) :
-        ProcessorTrackerFeature("ProcessorTrackerFeatureTrifocal", "PO", 3, _params_tracker_feature_trifocal ),
-        params_tracker_feature_trifocal_(_params_tracker_feature_trifocal),
-        capture_image_last_(nullptr),
-        capture_image_incoming_(nullptr),
-        prev_origin_ptr_(nullptr),
-        initialized_(false)
-{
-    assert(!(params_tracker_feature_trifocal_->yaml_file_params_vision_utils.empty()) && "Missing YAML file with vision_utils parameters!");
-    assert(file_exists(params_tracker_feature_trifocal_->yaml_file_params_vision_utils) && "Cannot setup processor: vision_utils' YAML file does not exist.");
-
-    pixel_cov_ = Eigen::Matrix2d::Identity() * params_tracker_feature_trifocal_->pixel_noise_std * params_tracker_feature_trifocal_->pixel_noise_std;
-
-    // Detector
-    std::string det_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "detector");
-    det_ptr_ = vision_utils::setupDetector(det_name, det_name + " detector", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-    // Descriptor
-    std::string des_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "descriptor");
-    des_ptr_ = vision_utils::setupDescriptor(des_name, des_name + " descriptor", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-    // Matcher
-    std::string mat_name = vision_utils::readYamlType(params_tracker_feature_trifocal_->yaml_file_params_vision_utils, "matcher");
-    mat_ptr_ = vision_utils::setupMatcher(mat_name, mat_name + " matcher", params_tracker_feature_trifocal_->yaml_file_params_vision_utils);
-
-    // DEBUG VIEW
-    if (params_tracker_feature_trifocal_->debug_view)
-    {
-        cv::startWindowThread();
-        cv::namedWindow("DEBUG VIEW", cv::WINDOW_NORMAL);
-        //cv::namedWindow("DEBUG MATCHES", cv::WINDOW_NORMAL);
-    }
-}
-
-// Destructor
-ProcessorTrackerFeatureTrifocal::~ProcessorTrackerFeatureTrifocal()
-{
-    if (params_tracker_feature_trifocal_->debug_view)
-        cv::destroyAllWindows();
-}
-
-bool ProcessorTrackerFeatureTrifocal::isInlier(const cv::KeyPoint& _kp_last, const cv::KeyPoint& _kp_incoming)
-{
-    // List of conditions
-    bool inlier = true;
-
-    // A. Check euclidean norm
-    inlier = inlier && (cv::norm(_kp_incoming.pt-_kp_last.pt) < mat_ptr_->getParams()->max_match_euclidean_dist);
-
-    // B. add your new condition test here
-    // inlier = inlier && ...
-
-    return inlier;
-}
-
-
-unsigned int ProcessorTrackerFeatureTrifocal::detectNewFeatures(const int& _max_new_features,
-                                                                const CaptureBasePtr& _capture,
-                                                                FeatureBasePtrList& _features_out)
-{
-//    // DEBUG =====================================
-//    clock_t debug_tStart;
-//    double debug_comp_time_;
-//    debug_tStart = clock();
-//    WOLF_TRACE("======== DetectNewFeatures =========");
-//    // ===========================================
-
-    for (unsigned int n_iterations = 0; _max_new_features == -1 || n_iterations < _max_new_features; ++n_iterations)
-    {
-        Eigen::Vector2i cell_last;
-        if (capture_image_last_->grid_features_->pickEmptyTrackingCell(cell_last))
-        {
-            // Get best keypoint in cell
-            vision_utils::FeatureIdxMap cell_feat_map = capture_image_last_->grid_features_->getFeatureIdxMap(cell_last(0), cell_last(1));
-            bool found_feature_in_cell = false;
-
-            for (auto target_last_pair_response_idx : cell_feat_map)
-            {
-                // Get KeyPoint in last
-                unsigned int index_last = target_last_pair_response_idx.second;
-                cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(index_last);
-                assert(target_last_pair_response_idx.first == kp_last.response && "[ProcessorTrackerFeatureTrifocal::detectNewFeatures]: response mismatch.");
-
-                // Check if there is match with incoming, if not we do not want it
-                if (capture_image_last_->map_index_to_next_.count(index_last))
-                {
-                    // matching keypoint in incoming
-                    unsigned int index_incoming = capture_image_last_->map_index_to_next_[index_last];
-                    cv::KeyPoint kp_incoming = capture_image_incoming_->keypoints_.at(index_incoming);
-
-                    // validate match with extra tests
-                    if (isInlier( kp_incoming, kp_last))
-                    {
-                        // Emplace WOLF feature
-                        auto ftr_point_last = FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                                      kp_last,
-                                                                                      index_last,
-                                                                                      capture_image_last_->descriptors_.row(index_last),
-                                                                                      pixel_cov_);
-
-                        _features_out.push_back(ftr_point_last);
-
-                        // hit cell to acknowledge there's a tracked point in that cell
-                        capture_image_last_->grid_features_->hitTrackingCell(kp_last);
-
-                        found_feature_in_cell = true;
-
-                        break; // Good kp found for this grid.
-                    }
-                }
-            }
-            if (!found_feature_in_cell)
-                capture_image_last_->grid_features_->blockTrackingCell(cell_last);
-        }
-        else
-            break; // There are no empty cells
-    }
-
-//    // DEBUG
-//    WOLF_TRACE("DetectNewFeatures - Number of new features detected: " , _feature_list_out.size() );
-//    debug_comp_time_ = (double)(clock() - debug_tStart) / CLOCKS_PER_SEC;
-//    WOLF_TRACE("--> TIME: detect new features: TOTAL ",debug_comp_time_);
-//    WOLF_TRACE("======== END DETECT NEW FEATURES =========");
-
-    return _features_out.size();
-}
-
-unsigned int ProcessorTrackerFeatureTrifocal::trackFeatures(const FeatureBasePtrList& _features_in,
-                                                            const CaptureBasePtr& _capture,
-                                                            FeatureBasePtrList& _features_out,
-                                                            FeatureMatchMap& _feature_matches)
-{
-//    // DEBUG =====================================
-//    clock_t debug_tStart;
-//    double debug_comp_time_;
-//    debug_tStart = clock();
-//    WOLF_TRACE("======== TrackFeatures =========");
-//    // ===========================================
-
-    for (auto feature_base_last_ : _features_in)
-    {
-        FeaturePointImagePtr feature_last_ = std::static_pointer_cast<FeaturePointImage>(feature_base_last_);
-
-        if ( capture_image_last_->map_index_to_next_.count(feature_last_->getIndexKeyPoint()) )
-        {
-            int index_incoming = capture_image_last_->map_index_to_next_[feature_last_->getIndexKeyPoint()];
-
-            if (capture_image_incoming_->matches_normalized_scores_.at(index_incoming) > mat_ptr_->getParams()->min_norm_score )
-            {
-                // Check Euclidean distance between keypoints
-                cv::KeyPoint kp_incoming = capture_image_incoming_->keypoints_.at(index_incoming);
-                cv::KeyPoint kp_last = capture_image_last_->keypoints_.at(feature_last_->getIndexKeyPoint());
-
-                if (isInlier(kp_last, kp_incoming))
-                {
-                    auto ftr_point_incoming = FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                                      kp_incoming,
-                                                                                      index_incoming,
-                                                                                      capture_image_incoming_->descriptors_.row(index_incoming),
-                                                                                      pixel_cov_);
-
-                    _features_out.push_back(ftr_point_incoming);
-
-                    _feature_matches[ftr_point_incoming] = std::make_shared<FeatureMatch>(FeatureMatch({feature_last_, capture_image_incoming_->matches_normalized_scores_.at(index_incoming)}));
-
-                    // hit cell to acknowledge there's a tracked point in that cell
-                    capture_image_incoming_->grid_features_->hitTrackingCell(kp_incoming);
-                }
-            }
-        }
-    }
-
-//    // DEBUG
-//    WOLF_TRACE("TrAckFeatures - Number of features tracked: " , _feature_list_out.size() );
-//    debug_comp_time_ = (double)(clock() - debug_tStart) / CLOCKS_PER_SEC;
-//    WOLF_TRACE("--> TIME: track: ",debug_comp_time_);
-//    WOLF_TRACE("======== END TRACK FEATURES =========");
-
-    return _features_out.size();
-}
-
-bool ProcessorTrackerFeatureTrifocal::correctFeatureDrift(const FeatureBasePtr _origin_feature, const FeatureBasePtr _last_feature, FeatureBasePtr _incoming_feature)
-{
-    // We purposely ignore this step
-    return true;
-}
-
-bool ProcessorTrackerFeatureTrifocal::voteForKeyFrame() const
-{
-//    // A. crossing voting threshold with ascending number of features
-    bool vote_up = false;
-//    // 1. vote if we did not have enough features before
-//    vote_up = vote_up && (previousNumberOfTracks() < params_tracker_feature_trifocal_->min_features_for_keyframe);
-//    // 2. vote if we have enough features now
-//    vote_up = vote_up && (incoming_ptr_->getFeatureList().size() >= params_tracker_feature_trifocal_->min_features_for_keyframe);
-
-    // B. crossing voting threshold with descending number of features
-    bool vote_down = true;
-    // 1. vote if we had enough features before
-//    vote_down = vote_down && (last_ptr_->getFeatureList().size() >= params_tracker_feature_trifocal_->min_features_for_keyframe);
-    // 2. vote if we have not enough features now
-    vote_down = vote_down && (incoming_ptr_->getFeatureList().size() < params_tracker_feature_trifocal_->min_features_for_keyframe);
-
-//    // C. Time-based policies
-    bool vote_time = false;
-////    vote_time = vote_time || (incoming_ptr_->getTimeStamp()-origin_ptr_->getTimeStamp() > 1.0);
-//
-//    if (vote_up)
-//        WOLF_TRACE("VOTE UP");
-//    if (vote_down)
-//        WOLF_TRACE("VOTE DOWN");
-//    if (vote_time)
-//        WOLF_TRACE("VOTE TIME");
-
-    return vote_up || vote_down || vote_time;
-}
-
-void ProcessorTrackerFeatureTrifocal::advanceDerived()
-{
-    ProcessorTrackerFeature::advanceDerived();
-}
-
-void ProcessorTrackerFeatureTrifocal::resetDerived()
-{
-    // Call parent class' reset method
-    ProcessorTrackerFeature::resetDerived();
-
-    // Conditionally assign the prev_origin pointer
-    if (initialized_)
-        prev_origin_ptr_ = origin_ptr_;
-
-//    // Print resulting list
-//    TrackMatches matches_prevorig_origin = track_matrix_.matches(prev_origin_ptr_, origin_ptr_);
-//    for (auto const & pair_trkid_pair : matches_prevorig_origin)
-//    {
-//        FeatureBasePtr feature_in_prev   = pair_trkid_pair.second.first;
-//        FeatureBasePtr feature_in_origin = pair_trkid_pair.second.second;
-//
-//        WOLF_DEBUG("Matches reset prev <-- orig: track: ", pair_trkid_pair.first,
-//                   " prev: ", feature_in_prev->id(),
-//                   " origin: ", feature_in_origin->id());
-//    }
-}
-
-void ProcessorTrackerFeatureTrifocal::preProcess()
-{
-//    //DEBUG
-//    WOLF_TRACE("-------- Image ", getIncoming()->id(), " -- t = ", getIncoming()->getTimeStamp(), " s ----------");
-
-    if (!initialized_)
-    {
-        if (origin_ptr_ && last_ptr_ && (last_ptr_ != origin_ptr_) && prev_origin_ptr_ == nullptr)
-            prev_origin_ptr_ = origin_ptr_;
-
-        if (prev_origin_ptr_ && origin_ptr_ && last_ptr_ && prev_origin_ptr_ != origin_ptr_)
-            initialized_ = true;
-    }
-
-    // Get capture
-    capture_image_incoming_ = std::static_pointer_cast<CaptureImage>(incoming_ptr_);
-
-    // Detect INC KeyPoints
-    capture_image_incoming_->keypoints_ = det_ptr_->detect(capture_image_incoming_->getImage());
-
-    // Get INC descriptors
-    capture_image_incoming_->descriptors_ = des_ptr_->getDescriptor(capture_image_incoming_->getImage(), capture_image_incoming_->keypoints_);
-
-    // Create and fill incoming grid
-    capture_image_incoming_->grid_features_ = std::make_shared<vision_utils::FeatureIdxGrid>(capture_image_incoming_->getImage().rows, capture_image_incoming_->getImage().cols, params_tracker_feature_trifocal_->n_cells_v, params_tracker_feature_trifocal_->n_cells_h);
-
-    capture_image_incoming_->grid_features_->insert(capture_image_incoming_->keypoints_);
-
-    // If last_ptr_ is not null, then we can do some computation here.
-    if (last_ptr_ != nullptr)
-    {
-        // Get capture
-        capture_image_last_ = std::static_pointer_cast<CaptureImage>(last_ptr_);
-
-        // Match full image (faster)
-        // We exchange the order of the descriptors to fill better the map hereafter (map does not allow a single key)
-        capture_image_incoming_->matches_normalized_scores_ = mat_ptr_->robustMatch(capture_image_incoming_->keypoints_,
-                                                                              capture_image_last_->keypoints_,
-                                                                              capture_image_incoming_->descriptors_,
-                                                                              capture_image_last_->descriptors_,
-                                                                              des_ptr_->getSize(),
-                                                                              capture_image_incoming_->matches_from_precedent_);
-
-        // Set capture map of match indices
-        for (auto match : capture_image_incoming_->matches_from_precedent_)
-            capture_image_last_->map_index_to_next_[match.trainIdx] = match.queryIdx; // map[last] = incoming
-
-        // DEBUG VIEW
-//        if (params_tracker_feature_trifocal_->debug_view)
-//        {
-//            cv::Mat img_last = (std::static_pointer_cast<CaptureImage>(last_ptr_))->getImage();
-//            cv::Mat img_incoming = (std::static_pointer_cast<CaptureImage>(incoming_ptr_))->getImage();
-//
-//            cv::putText(img_last, "LAST",    cv::Point(img_last.cols/2,20), cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 10.0);
-//            cv::putText(img_incoming, "INCOMING",cv::Point(img_last.cols/2,20), cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 10.0);
-//
-//            cv::Mat img_matches;
-//            cv::drawMatches(img_incoming, capture_incoming_->keypoints_, img_last, capture_last_->keypoints_, capture_incoming_->matches_from_precedent_, img_matches);
-//            cv::imshow("DEBUG MATCHES", img_matches);
-//            cv::waitKey(0);
-//        }
-    }
-}
-
-void ProcessorTrackerFeatureTrifocal::postProcess()
-{
-    std::map<int,std::list<vision_utils::KeyPointEnhanced> > kp_enh_tracks;
-
-    for (auto const & feat_base : last_ptr_->getFeatureList())
-    {
-        FeaturePointImagePtr feat = std::static_pointer_cast<FeaturePointImage>(feat_base);
-        unsigned int feat_id = feat->id();
-        unsigned int track_id = feat->trackId();
-
-        // tracks
-        std::list<vision_utils::KeyPointEnhanced> kp_enh_track;
-        for (auto feat_base : track_matrix_.trackAsVector(track_id))
-        {
-            FeaturePointImagePtr feat_img = std::static_pointer_cast<FeaturePointImage>(feat_base);
-            vision_utils::KeyPointEnhanced kp_enh(feat_img->getKeypoint(),
-                                                  feat_id,
-                                                  track_id,
-                                                  track_matrix_.trackSize(track_id),
-                                                  feat_img->getMeasurementCovariance());
-            kp_enh_track.push_back(kp_enh);
-        }
-
-        kp_enh_tracks[feat_id] = kp_enh_track;
-    }
-
-    // DEBUG VIEW
-    if (params_tracker_feature_trifocal_->debug_view)
-    {
-        image_debug_ = vision_utils::buildImageProcessed((std::static_pointer_cast<CaptureImage>(last_ptr_))->getImage(), kp_enh_tracks);
-
-        cv::imshow("DEBUG VIEW", image_debug_);
-        cv::waitKey(1);
-    }
-}
-
-FactorBasePtr ProcessorTrackerFeatureTrifocal::emplaceFactor(FeatureBasePtr _feature_ptr, FeatureBasePtr _feature_other_ptr)
-{
-    // NOTE: This function cannot be implemented because the API lacks an input to feature_prev_origin.
-    // Therefore, we implement establishFactors() instead and do all the job there.
-    // This function remains empty, and with a warning or even an error issued in case someone calls it.
-    std::cout << "033[1;33m [WARN]:033[0m ProcessorTrackerFeatureTrifocal::emplaceFactor is empty." << std::endl;
-    FactorBasePtr return_var{}; //TODO: fill this variable
-    return return_var;
-}
-
-void ProcessorTrackerFeatureTrifocal::establishFactors()
-{
-//    WOLF_TRACE("===== ESTABLISH FACTORS =====");
-
-    if (initialized_)
-    {
-        // get tracks between prev, origin and last
-        TrackMatches matches = track_matrix_.matches(prev_origin_ptr_, last_ptr_); // it's guaranteed by construction that the track also includes origin
-
-        for (auto pair_trkid_match : matches) // OMG this will add potentially a loooot of factors! TODO see a smarter way of adding factors
-        {                                     // Currently reduced by creating factors for large tracks
-            // get track ID
-            SizeStd trk_id = pair_trkid_match.first;
-
-            if (track_matrix_.trackSize(trk_id)>params_tracker_feature_trifocal_->min_track_length_for_factor)
-            {
-                // get the last feature in the track
-                FeatureBasePtr ftr_last = pair_trkid_match.second.second;
-
-                // get the first feature in the whole track
-                FeatureBasePtr ftr_first = track_matrix_.firstFeature(trk_id);
-
-                // get the middle feature of the track using the average of the time stamps
-                FeatureBasePtr ftr_mid = nullptr;
-
-                TimeStamp ts_first      = ftr_first->getCapture()->getTimeStamp();
-                TimeStamp ts_last       = ftr_last->getCapture()->getTimeStamp();
-                double    Dt2           = (ts_last - ts_first) / 2.0;
-                TimeStamp ts_ave        = ts_first + Dt2;
-
-                double dt_dev_min = Dt2;
-                auto track = track_matrix_.track(trk_id);
-                for (auto ftr_it = track.begin() ; ftr_it != track.end() ; ftr_it ++)
-                {
-                    if ( ftr_it->second->getCapture() != nullptr ) // have capture
-                    {
-                        if ( auto kf_mid = ftr_it->second->getCapture()->getFrame() ) // have frame
-                        {
-                            TimeStamp ts_mid    = kf_mid->getTimeStamp();
-
-                            auto dt_dev_curr = fabs(ts_mid - ts_ave);
-                            if (dt_dev_curr <= dt_dev_min) // dt_dev is decreasing
-                            {
-                                dt_dev_min  = dt_dev_curr;
-                                ftr_mid = ftr_it->second;
-                            }
-                            else //if (dt_dev is increasing)
-                                break;
-                        }
-                    }
-                }
-
-                // Several safety checks
-                assert(ftr_mid != nullptr   && "Middle feature is nullptr!");
-                assert(ftr_mid->getCapture()->getFrame() != nullptr   && "Middle feature's frame is nullptr!");
-                assert(ftr_mid != ftr_first && "First and middle features are the same!");
-                assert(ftr_mid != ftr_last  && "Last and middle features are the same!");
-
-                // emplace factor
-                auto ctr = FactorBase::emplace<FactorTrifocal>(ftr_last, ftr_first, ftr_mid, ftr_last, shared_from_this(), params_->apply_loss_function, FAC_ACTIVE);
-            }
-        }
-    }
-
-//    WOLF_TRACE("===== END ESTABLISH FACTORS =====");
-
-}
-
-void ProcessorTrackerFeatureTrifocal::setParams(const ParamsProcessorTrackerFeatureTrifocalPtr _params)
-{
-    params_tracker_feature_trifocal_ = _params;
-}
-
-void ProcessorTrackerFeatureTrifocal::configure(SensorBasePtr _sensor)
-{
-    _sensor->setNoiseStd(Vector2d::Ones() * params_tracker_feature_trifocal_->pixel_noise_std);
-}
-
-ProcessorBasePtr ProcessorTrackerFeatureTrifocal::create(const std::string& _unique_name,
-                                                         const ParamsProcessorBasePtr _params)
-{
-  const auto params = std::static_pointer_cast<ParamsProcessorTrackerFeatureTrifocal>(_params);
-
-  ProcessorBasePtr prc_ptr = std::make_shared<ProcessorTrackerFeatureTrifocal>(params);
-  prc_ptr->setName(_unique_name);
-  return prc_ptr;
-}
-
-} // namespace wolf
-
-// Register in the FactoryProcessor
-#include "core/processor/factory_processor.h"
-namespace wolf {
-WOLF_REGISTER_PROCESSOR(ProcessorTrackerFeatureTrifocal)
-} // namespace wolf
diff --git a/src/processor/processor_tracker_landmark_image.cpp b/src/processor/processor_tracker_landmark_image.cpp
deleted file mode 100644
index e5f5226d7a03f69aaf9f0bb7dc0a3d373f8cf242..0000000000000000000000000000000000000000
--- a/src/processor/processor_tracker_landmark_image.cpp
+++ /dev/null
@@ -1,479 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#include "vision/processor/processor_tracker_landmark_image.h"
-
-#include "vision/capture/capture_image.h"
-#include "vision/factor/factor_ahp.h"
-#include "vision/feature/feature_point_image.h"
-#include "vision/math/pinhole_tools.h"
-#include "vision/sensor/sensor_camera.h"
-
-#include "core/feature/feature_base.h"
-#include "core/frame/frame_base.h"
-#include "core/utils/logging.h"
-#include "core/map/map_base.h"
-#include "core/problem/problem.h"
-#include "core/state_block/state_block.h"
-#include "core/common/time_stamp.h"
-
-// vision_utils
-#include <vision_utils/detectors.h>
-#include <vision_utils/descriptors.h>
-#include <vision_utils/matchers.h>
-#include <vision_utils/algorithms.h>
-
-#include <Eigen/Geometry>
-#include <iomanip> //setprecision
-
-namespace wolf
-{
-
-ProcessorTrackerLandmarkImage::ProcessorTrackerLandmarkImage(ParamsProcessorTrackerLandmarkImagePtr _params_tracker_landmark_image) :
-    ProcessorTrackerLandmark("ProcessorTrackerLandmarkImage", "PO", _params_tracker_landmark_image),
-    cell_width_(0),
-    cell_height_(0),
-    params_tracker_landmark_image_(_params_tracker_landmark_image),
-    n_feature_(0),
-    landmark_idx_non_visible_(0)
-{
-    // Detector
-    std::string det_name = vision_utils::readYamlType(params_tracker_landmark_image_->yaml_file_params_vision_utils, "detector");
-    det_ptr_ = vision_utils::setupDetector(det_name, det_name + " detector", params_tracker_landmark_image_->yaml_file_params_vision_utils);
-
-    // Descriptor
-    std::string des_name = vision_utils::readYamlType(params_tracker_landmark_image_->yaml_file_params_vision_utils, "descriptor");
-    des_ptr_ = vision_utils::setupDescriptor(des_name, des_name + " descriptor", params_tracker_landmark_image_->yaml_file_params_vision_utils);
-
-    // Matcher
-    std::string mat_name = vision_utils::readYamlType(params_tracker_landmark_image_->yaml_file_params_vision_utils, "matcher");
-    mat_ptr_ = vision_utils::setupMatcher(mat_name, mat_name + " matcher", params_tracker_landmark_image_->yaml_file_params_vision_utils);
-
-    // Active search grid
-    vision_utils::AlgorithmBasePtr alg_ptr = vision_utils::setupAlgorithm("ACTIVESEARCH", "ACTIVESEARCH algorithm", params_tracker_landmark_image_->yaml_file_params_vision_utils);
-    active_search_ptr_ = std::static_pointer_cast<vision_utils::AlgorithmACTIVESEARCH>(alg_ptr);
-}
-
-ProcessorTrackerLandmarkImage::~ProcessorTrackerLandmarkImage()
-{
-    //
-}
-
-void ProcessorTrackerLandmarkImage::configure(SensorBasePtr _sensor)
-{
-    SensorCameraPtr camera(std::static_pointer_cast<SensorCamera>(_sensor));
-    image_.width_ = camera->getImgWidth();
-    image_.height_ = camera->getImgHeight();
-
-    active_search_ptr_->initAlg(camera->getImgWidth(), camera->getImgHeight(), det_ptr_->getPatternRadius() );
-
-    params_tracker_landmark_image_activesearch_ptr_ = std::static_pointer_cast<vision_utils::AlgorithmParamsACTIVESEARCH>( active_search_ptr_->getParams() );
-
-    cell_width_ = image_.width_ / params_tracker_landmark_image_activesearch_ptr_->n_cells_h;
-    cell_height_ = image_.height_ / params_tracker_landmark_image_activesearch_ptr_->n_cells_v;
-}
-
-void ProcessorTrackerLandmarkImage::preProcess()
-{
-    auto incoming_ptr = std::dynamic_pointer_cast<CaptureImage>(incoming_ptr_);
-    assert(incoming_ptr != nullptr && ("Capture type mismatch. Processor " + getName() + " can only process captures of type CaptureImage").c_str());
-    image_incoming_ = incoming_ptr->getImage();
-    active_search_ptr_->renew();
-
-    detector_roi_.clear();
-    feat_lmk_found_.clear();
-}
-
-void ProcessorTrackerLandmarkImage::postProcess()
-{
-    detector_roi_.clear();
-    feat_lmk_found_.clear();
-}
-
-unsigned int ProcessorTrackerLandmarkImage::findLandmarks(const LandmarkBasePtrList& _landmarks_in,
-                                                          const CaptureBasePtr& _capture,
-                                                          FeatureBasePtrList& _features_out,
-                                                          LandmarkMatchMap& _feature_landmark_correspondences)
-{
-    KeyPointVector candidate_keypoints;
-    cv::Mat candidate_descriptors;
-    DMatchVector cv_matches;
-
-    Eigen::VectorXd current_state = getProblem()->getState(incoming_ptr_->getTimeStamp()).vector("PO");
-
-    for (auto landmark_in_ptr : _landmarks_in)
-    {
-
-        // project landmark into incoming capture
-        LandmarkAhpPtr landmark_ptr = std::static_pointer_cast<LandmarkAhp>(landmark_in_ptr);
-        SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(this->getSensor());
-        Eigen::Vector4d point3d_hmg;
-        Eigen::Vector2d pixel;
-
-        landmarkInCurrentCamera(current_state, landmark_ptr, point3d_hmg);
-
-        pixel = pinhole::projectPoint(camera->getIntrinsic()->getState(),
-                                      camera->getDistortionVector(),
-                                      point3d_hmg.head<3>());
-
-        if(pinhole::isInImage(pixel, image_.width_, image_.height_))
-        {
-            cv::Rect roi = vision_utils::setRoi(pixel[0], pixel[1], cell_width_, cell_height_);
-
-            active_search_ptr_->hitCell(pixel);
-
-            cv::Mat target_descriptor = landmark_ptr->getCvDescriptor();
-
-            if (detect(image_incoming_, roi, candidate_keypoints, candidate_descriptors))
-            {
-                double normalized_score = match(target_descriptor,candidate_descriptors,cv_matches);
-
-                if (normalized_score > mat_ptr_->getParams()->min_norm_score )
-                {
-                    FeaturePointImagePtr incoming_point_ptr = std::static_pointer_cast<FeaturePointImage>(
-                            FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                    candidate_keypoints[cv_matches[0].trainIdx],
-                                                                    cv_matches[0].trainIdx,
-                                                                    candidate_descriptors.row(cv_matches[0].trainIdx),
-                                                                    Eigen::Matrix2d::Identity()*params_tracker_landmark_image_->pixel_noise_var) );
-
-                    incoming_point_ptr->setTrackId(landmark_in_ptr->id());
-                    incoming_point_ptr->setLandmarkId(landmark_in_ptr->id());
-                    incoming_point_ptr->setScore(normalized_score);
-                    incoming_point_ptr->setExpectation(pixel);
-
-                    _features_out.push_back(incoming_point_ptr);
-
-                    _feature_landmark_correspondences[incoming_point_ptr] = std::make_shared<LandmarkMatch>(landmark_in_ptr, normalized_score);
-
-                    feat_lmk_found_.push_back(incoming_point_ptr);
-
-                    // To visualize
-                    cv::Rect roi2 = roi;
-                    vision_utils::trimRoi(image_.width_, image_.height_, roi2);
-                    incoming_point_ptr->setTrackerRoi(roi2);
-                }
-//                else
-//                    std::cout << "NOT FOUND" << std::endl;
-            }
-//            else
-//                std::cout << "NOT DETECTED/FOUND" << std::endl;
-        }
-//        else
-//            std::cout << "NOT in the image" << std::endl;
-    }
-//    std::cout << "\tNumber of Features tracked: " << _features_incoming_out.size() << std::endl;
-    landmarks_tracked_ = _features_out.size();
-    return _features_out.size();
-}
-
-bool ProcessorTrackerLandmarkImage::voteForKeyFrame() const
-{
-    return false;
-//    return landmarks_tracked_ < params_tracker_landmark_image_->min_features_for_keyframe;
-}
-
-unsigned int ProcessorTrackerLandmarkImage::detectNewFeatures(const int& _max_new_features,
-                                                              const CaptureBasePtr& _capture,
-                                                              FeatureBasePtrList& _features_out)
-{
-    cv::Rect roi;
-    KeyPointVector new_keypoints;
-    cv::Mat new_descriptors;
-    cv::KeyPointsFilter keypoint_filter;
-    unsigned int n_new_features = 0;
-
-    for (unsigned int n_iterations = 0; _max_new_features == -1 || n_iterations < _max_new_features; n_iterations++)
-    {
-        if (active_search_ptr_->pickEmptyRoi(roi))
-        {
-            detector_roi_.push_back(roi);
-            if (detect(image_last_, roi, new_keypoints, new_descriptors))
-            {
-                KeyPointVector list_keypoints = new_keypoints;
-                unsigned int index = 0;
-                keypoint_filter.retainBest(new_keypoints,1);
-                for(unsigned int i = 0; i < list_keypoints.size(); i++)
-                {
-                    if(list_keypoints[i].pt == new_keypoints[0].pt)
-                        index = i;
-                }
-
-                if(new_keypoints[0].response > params_tracker_landmark_image_activesearch_ptr_->min_response_new_feature)
-                {
-                    list_response_.push_back(new_keypoints[0].response);
-                    FeaturePointImagePtr point_ptr = std::static_pointer_cast<FeaturePointImage>(
-                            FeatureBase::emplace<FeaturePointImage>(_capture,
-                                                                    new_keypoints[0],
-                                                                    0,
-                                                                    new_descriptors.row(index),
-                                                                    Eigen::Matrix2d::Identity()*params_tracker_landmark_image_->pixel_noise_var) );
-                    point_ptr->setIsKnown(false);
-                    point_ptr->setTrackId(point_ptr->id());
-                    point_ptr->setExpectation(Eigen::Vector2d(new_keypoints[0].pt.x,new_keypoints[0].pt.y));
-                    _features_out.push_back(point_ptr);
-
-                    active_search_ptr_->hitCell(new_keypoints[0]);
-
-                    n_new_features++;
-                }
-
-            }
-            else
-                active_search_ptr_->blockCell(roi);
-        }
-        else
-            break;
-    }
-
-    WOLF_DEBUG( "Number of new features detected: " , n_new_features );
-    return n_new_features;
-}
-
-LandmarkBasePtr ProcessorTrackerLandmarkImage::emplaceLandmark(FeatureBasePtr _feature_ptr)
-{
-
-    FeaturePointImagePtr feat_point_image_ptr = std::static_pointer_cast<FeaturePointImage>( _feature_ptr);
-    FrameBasePtr anchor_frame = getLast()->getFrame();
-
-    Eigen::Vector2d point2d;
-    point2d[0] = feat_point_image_ptr->getKeypoint().pt.x;
-    point2d[1] = feat_point_image_ptr->getKeypoint().pt.y;
-
-    double distance = params_tracker_landmark_image_->distance; // arbitrary value
-    Eigen::Vector4d vec_homogeneous;
-
-    point2d = pinhole::depixellizePoint(getSensor()->getIntrinsic()->getState(),point2d);
-    point2d = pinhole::undistortPoint((std::static_pointer_cast<SensorCamera>(getSensor()))->getCorrectionVector(),point2d);
-
-    Eigen::Vector3d point3d;
-    point3d.head<2>() = point2d;
-    point3d(2) = 1;
-
-    point3d.normalize();
-
-    vec_homogeneous = {point3d(0),point3d(1),point3d(2),1/distance};
-
-    auto lmk_ahp_ptr = LandmarkBase::emplace<LandmarkAhp>(getProblem()->getMap(),
-                                                          vec_homogeneous,
-                                                          anchor_frame,
-                                                          getSensor(),
-                                                          feat_point_image_ptr->getDescriptor());
-    _feature_ptr->setLandmarkId(lmk_ahp_ptr->id());
-    return lmk_ahp_ptr;
-}
-
-FactorBasePtr ProcessorTrackerLandmarkImage::emplaceFactor(FeatureBasePtr _feature_ptr, LandmarkBasePtr _landmark_ptr)
-{
-
-    if ((std::static_pointer_cast<LandmarkAhp>(_landmark_ptr))->getAnchorFrame() == last_ptr_->getFrame()) //FIXME: shouldn't it be _feature_ptr->getFrame() ?
-    {
-        return FactorBasePtr();
-    }
-    else
-    {
-        assert (last_ptr_ && "bad last ptr");
-        assert (_landmark_ptr && "bad lmk ptr");
-
-        LandmarkAhpPtr landmark_ahp = std::static_pointer_cast<LandmarkAhp>(_landmark_ptr);
-
-        return FactorBase::emplace<FactorAhp>(_feature_ptr, _feature_ptr, landmark_ahp, shared_from_this(), params_->apply_loss_function);
-    }
-}
-
-// ==================================================================== My own functions
-
-void ProcessorTrackerLandmarkImage::landmarkInCurrentCamera(const Eigen::VectorXd& _current_state,
-                                                     const LandmarkAhpPtr   _landmark,
-                                                     Eigen::Vector4d&       _point3d_hmg)
-{
-    using namespace Eigen;
-
-    /*
-     * Rationale: we transform the landmark from anchor camera to current camera:
-     *
-     *      C0 ---> R0 ---> W ---> R1 ---> C1
-     *
-     * where
-     *      '0' is 'anchor'
-     *      '1' is 'current',
-     *      'R' is 'robot'
-     *      'C' is 'camera'
-     *      'W' is 'world',
-     *
-     * by concatenating the individual transforms:
-     *
-     *      T_W_R0,
-     *      T_W_R1,
-     *      T_R0_C0,
-     *      T_R1_C1
-     *
-     * We use Eigen::Transform which is like using homogeneous transform matrices with a simpler API
-     */
-
-    // Assert frame is 3d with at least PQ
-    assert((_current_state.size() == 7 || _current_state.size() == 16) && "Wrong state size! Should be 7 for 3d pose or 16 for Imu.");
-
-    // ALL TRANSFORMS
-    Transform<double,3,Eigen::Isometry> T_W_R0, T_W_R1, T_R0_C0, T_R1_C1;
-
-    // world to anchor robot frame
-    Translation<double,3>  t_w_r0(_landmark->getAnchorFrame()->getP()->getState()); // sadly we cannot put a Map over a translation
-    const Quaterniond q_w_r0(Eigen::Vector4d(_landmark->getAnchorFrame()->getO()->getState()));
-    T_W_R0 = t_w_r0 * q_w_r0;
-
-    // world to current robot frame
-    Translation<double,3>  t_w_r1(_current_state.head<3>());
-    Map<const Quaterniond> q_w_r1(_current_state.data() + 3);
-    T_W_R1 = t_w_r1 * q_w_r1;
-
-    // anchor robot to anchor camera
-    Translation<double,3>  t_r0_c0(_landmark->getAnchorSensor()->getP()->getState());
-    const Quaterniond q_r0_c0(Eigen::Vector4d(_landmark->getAnchorSensor()->getO()->getState()));
-    T_R0_C0 = t_r0_c0 * q_r0_c0;
-
-    // current robot to current camera
-    Translation<double,3>  t_r1_c1(this->getSensor()->getP()->getState());
-    const Quaterniond q_r1_c1(Eigen::Vector4d(this->getSensor()->getO()->getState()));
-    T_R1_C1 = t_r1_c1 * q_r1_c1;
-
-    // Transform lmk from c0 to c1 and exit
-    Vector4d landmark_hmg_c0 = _landmark->getP()->getState(); // lmk in anchor frame
-    _point3d_hmg = T_R1_C1.inverse(Eigen::Isometry) * T_W_R1.inverse(Eigen::Isometry) * T_W_R0 * T_R0_C0 * landmark_hmg_c0;
-}
-
-double ProcessorTrackerLandmarkImage::match(const cv::Mat _target_descriptor, const cv::Mat _candidate_descriptors, DMatchVector& _cv_matches)
-{
-    std::vector<double> normalized_scores = mat_ptr_->match(_target_descriptor, _candidate_descriptors, des_ptr_->getSize(), _cv_matches);
-    return normalized_scores[0];
-}
-
-unsigned int ProcessorTrackerLandmarkImage::detect(const cv::Mat _image, cv::Rect& _roi, KeyPointVector& _new_keypoints, cv::Mat& new_descriptors)
-{
-    _new_keypoints = det_ptr_->detect(_image, _roi);
-    new_descriptors = des_ptr_->getDescriptor(_image, _new_keypoints);
-    return _new_keypoints.size();
-}
-
-void ProcessorTrackerLandmarkImage::drawTrackerRoi(cv::Mat _image, cv::Scalar _color)
-{
-    CaptureImagePtr _capture = std::static_pointer_cast<CaptureImage>(last_ptr_);
-    if (last_ptr_!=nullptr)
-    {
-        for (auto feature : _capture->getFeatureList())
-            cv::rectangle(_image, std::static_pointer_cast<FeaturePointImage>(feature)->getTrackerRoi(), _color, 1, 8, 0);
-    }
-}
-
-void ProcessorTrackerLandmarkImage::drawRoi(cv::Mat _image, std::list<cv::Rect> _roi_list, cv::Scalar _color)
-{
-    if (last_ptr_!=nullptr)
-    {
-        for (auto roi : _roi_list)
-            cv::rectangle(_image, roi, _color, 1, 8, 0);
-    }
-}
-
-void ProcessorTrackerLandmarkImage::drawFeaturesFromLandmarks(cv::Mat _image)
-{
-    if (last_ptr_!=nullptr)
-    {
-        FeaturePointImagePtr ftr;
-        for(auto feature_point : feat_lmk_found_)
-        {
-            ftr = std::static_pointer_cast<FeaturePointImage>(feature_point);
-
-            cv::Point2f point = ftr->getKeypoint().pt;
-            cv::circle(_image, point, 2, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
-
-            cv::Point2f point2 = point;
-            point2.x = point2.x - 16;
-            cv::putText(_image, std::to_string(ftr->landmarkId()) + "/" + std::to_string((int)(100*ftr->getScore())), point2,
-                        cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 255.0, 0.0));
-        }
-    }
-}
-
-void ProcessorTrackerLandmarkImage::drawLandmarks(cv::Mat _image)
-{
-    if (last_ptr_!=nullptr)
-    {
-        unsigned int num_lmks_in_img = 0;
-
-        Eigen::VectorXd current_state = last_ptr_->getFrame()->getState().vector("PO");
-        SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(getSensor());
-
-        for (auto landmark_base_ptr : getProblem()->getMap()->getLandmarkList())
-        {
-            LandmarkAhpPtr landmark_ptr = std::static_pointer_cast<LandmarkAhp>(landmark_base_ptr);
-
-            Eigen::Vector4d point3d_hmg;
-            landmarkInCurrentCamera(current_state, landmark_ptr, point3d_hmg);
-
-            Eigen::Vector2d point2d = pinhole::projectPoint(camera->getIntrinsic()->getState(), // k
-                                                            camera->getDistortionVector(),          // d
-                                                            point3d_hmg.head(3));                   // v
-
-            if(pinhole::isInImage(point2d,image_.width_,image_.height_))
-            {
-                num_lmks_in_img++;
-
-                cv::Point2f point;
-                point.x = point2d[0];
-                point.y = point2d[1];
-
-                cv::circle(_image, point, 4, cv::Scalar(51.0, 51.0, 255.0), 1, 3, 0);
-                cv::putText(_image, std::to_string(landmark_ptr->id()), point, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(100.0, 100.0, 255.0) );
-            }
-        }
-        cv::Point label_for_landmark_point;
-        label_for_landmark_point.x = 3;
-        label_for_landmark_point.y = 10;
-        cv::putText(_image, std::to_string(landmarks_tracked_), label_for_landmark_point,
-                    cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 0.0, 255.0));
-
-        cv::Point label_for_landmark_point2;
-        label_for_landmark_point2.x = 3;
-        label_for_landmark_point2.y = 20;
-        cv::putText(_image, std::to_string(num_lmks_in_img), label_for_landmark_point2,
-                    cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 0.0, 255.0));
-
-//    std::cout << "\t\tTotal landmarks: " << counter << std::endl;
-    }
-}
-
-//namespace wolf{
-
-ProcessorBasePtr ProcessorTrackerLandmarkImage::create(const std::string& _unique_name, const ParamsProcessorBasePtr _params)
-{
-    ProcessorTrackerLandmarkImagePtr prc_ptr = std::make_shared<ProcessorTrackerLandmarkImage>(std::static_pointer_cast<ParamsProcessorTrackerLandmarkImage>(_params));
-    prc_ptr->setName(_unique_name);
-    return prc_ptr;
-}
-
-} // namespace wolf
-
-// Register in the FactorySensor
-#include "core/processor/factory_processor.h"
-namespace wolf {
-WOLF_REGISTER_PROCESSOR(ProcessorTrackerLandmarkImage)
-} // namespace wolf
-
diff --git a/src/processor/processor_visual_odometry.cpp b/src/processor/processor_visual_odometry.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b52a01825caf8d047e1dcc23747f5feaa471ccce
--- /dev/null
+++ b/src/processor/processor_visual_odometry.cpp
@@ -0,0 +1,738 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+
+
+//standard
+#include "vision/processor/processor_visual_odometry.h"
+
+#include <opencv2/imgproc.hpp>
+
+#include <chrono>
+#include <ctime>
+
+namespace wolf{
+
+ProcessorVisualOdometry::ProcessorVisualOdometry(ParamsProcessorVisualOdometryPtr _params_vo) :
+                ProcessorTracker("ProcessorVisualOdometry", "PO", 3, _params_vo),
+                params_visual_odometry_(_params_vo),
+                frame_count_(0)
+{
+    // Preprocessor stuff
+    detector_ = cv::FastFeatureDetector::create(_params_vo->fast.threshold,
+                                                _params_vo->fast.non_max_suppresion,
+                                                cv::FastFeatureDetector::TYPE_9_16); // TYPE_5_8, TYPE_7_12, TYPE_9_16
+    
+    // Processor stuff
+    // Set pixel noise covariance
+    Eigen::Vector2d std_pix; std_pix << params_visual_odometry_->std_pix, params_visual_odometry_->std_pix;
+    pixel_cov_ = std_pix.array().square().matrix().asDiagonal();
+
+}
+
+void ProcessorVisualOdometry::configure(SensorBasePtr _sensor)
+{
+	//Initialize camera sensor pointer
+	sen_cam_ = std::static_pointer_cast<SensorCamera>(_sensor);
+    Eigen::Matrix3d K = sen_cam_->getIntrinsicMatrix();
+    
+    Kcv_ = (cv::Mat_<float>(3,3) << K(0,0), 0, K(0,2),
+               0, K(1,1), K(1,2),
+               0, 0, 1);
+
+    
+    // Tessalation of the image
+    cell_grid_ = ActiveSearchGrid(sen_cam_->getImgWidth(), sen_cam_->getImgHeight(),
+                                  params_visual_odometry_->grid.nbr_cells_h,
+                                  params_visual_odometry_->grid.nbr_cells_v,
+                                  params_visual_odometry_->grid.margin,
+                                  params_visual_odometry_->grid.separation);
+}
+
+TracksMap ProcessorVisualOdometry::mergeTracks(const TracksMap& tracks_prev_curr, const TracksMap& tracks_curr_next){
+    TracksMap tracks_prev_next;
+    for (auto &match : tracks_prev_curr){
+        if (tracks_curr_next.count(match.second)){
+            tracks_prev_next[match.first] = tracks_curr_next.at(match.second);
+        }
+    }
+    return tracks_prev_next;
+}
+
+void ProcessorVisualOdometry::preProcess()
+{
+
+    auto t1 = std::chrono::system_clock::now();
+    
+    // Get Capture
+    capture_image_incoming_ = std::static_pointer_cast<CaptureImage>(incoming_ptr_);
+
+
+    cv::Mat img_incoming = capture_image_incoming_->getImage();
+
+
+    /* Equalize image for better detection and tracking
+     * available methods:
+     *      0. none
+     *      1. average
+     *      2. opencv: histogram_equalization
+     *      3. opencv: CLAHE
+     */
+    switch (params_visual_odometry_->equalization.method)
+    {
+        case 0:
+            break;
+        case 1:
+        {
+            // average to central brightness
+            auto img_avg = (cv::mean(img_incoming)).val[0];
+            img_incoming += cv::Scalar(round(params_visual_odometry_->equalization.average.median - img_avg) );
+            break;
+        }
+        case 2:
+        {
+            cv::equalizeHist( img_incoming, img_incoming );
+            break;
+        }
+        case 3:
+        {
+            // Contrast Limited Adaptive Histogram Equalization  CLAHE
+            // -> more continuous lighting and higher contrast images
+            cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(params_visual_odometry_->equalization.clahe.clip_limit,
+                                                       params_visual_odometry_->equalization.clahe.tile_grid_size);
+            clahe->apply(img_incoming, img_incoming);
+            break;
+        }
+    }
+
+
+    // Time to PREPreprocess the image if necessary: greyscale if BGR, CLAHE etc...
+    // once preprocessing is done, replace the original image (?)
+
+    // if first image, compute keypoints, add to capture incoming and return
+    if (last_ptr_ == nullptr){
+
+        // detect one FAST keypoint in each cell of the grid
+        cv::Rect rect_roi;
+        Eigen::Vector2i cell_index;
+        std::vector<cv::KeyPoint> kps_roi;
+        for (int i=1; i < params_visual_odometry_->grid.nbr_cells_h-1; i++){
+            for (int j=1; j < params_visual_odometry_->grid.nbr_cells_v-1; j++){
+                cell_index << i,j;
+                cell_grid_.cell2roi(cell_index, rect_roi);
+
+                cv::Mat img_roi(img_incoming, rect_roi);  // no data copy -> no overhead
+                detector_->detect(img_roi, kps_roi);
+
+                if (kps_roi.size() > 0){
+                    // retain only the best image in each region of interest
+                    retainBest(kps_roi, 1);
+                    // Keypoints are detected in the local coordinates of the region of interest
+                    // -> translate to the full image corner coordinate system
+                    kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
+                    kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
+                    capture_image_incoming_->addKeyPoint(kps_roi.at(0));
+                }
+            }
+        }
+        WOLF_DEBUG( "Initially detected " , capture_image_incoming_->getKeyPoints().size(), " keypoints in incoming" );
+
+        // Initialize the tracks data structure with a "dummy track" where the keypoint is pointing to itself
+        TracksMap tracks_init;
+        for (auto mwkp : capture_image_incoming_->getKeyPoints()){
+            tracks_init[mwkp.first] = mwkp.first;
+        }
+        capture_image_incoming_->setTracksOrigin(tracks_init);
+        capture_image_incoming_->setTracksPrev(tracks_init);
+
+        auto __attribute__((unused)) dt_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count();
+        WOLF_DEBUG( "dt_preprocess (ms): " , dt_preprocess );
+
+        return;
+    }
+
+    ////////////////////////////////
+    // FEATURE TRACKING
+    // Update capture Incoming data
+    //   - Track keypoints last->incoming
+    //   - Merge tracks origin->last with last->incoming to get origin->incoming
+    //   - Outlier rejection origin->incoming (essential matrix)
+    //   - If too few keypoints in incoming, detect new keypoints and last and track them in last->incoming
+    //   - All the results are stored in incoming capture for later Tree Processing
+    ////////////////////////////////
+
+    capture_image_last_     = std::static_pointer_cast<CaptureImage>(last_ptr_);
+    capture_image_origin_   = std::static_pointer_cast<CaptureImage>(origin_ptr_);
+    cv::Mat img_last        = capture_image_last_->getImage();
+
+    KeyPointsMap mwkps_origin   = capture_image_origin_ ->getKeyPoints();
+    KeyPointsMap mwkps_last     = capture_image_last_   ->getKeyPoints();
+    KeyPointsMap mwkps_incoming;  // init incoming
+
+
+
+    WOLF_DEBUG( "Tracking " , mwkps_last.size(), " keypoints in last" );
+
+    // TracksMap between last and incoming
+    // Input: ID of Wkp in last. Output: ID of the tracked Wkp in incoming.
+    TracksMap tracks_last_incoming = kltTrack(img_last, img_incoming, mwkps_last, mwkps_incoming);
+
+//    WOLF_DEBUG( "Tracked " , mwkps_incoming.size(), " keypoints to incoming" );
+
+    // TracksMap between origin and last
+    // Input: ID of Wkp in origin. Output: ID of the tracked Wkp in last.
+    TracksMap tracks_origin_last = capture_image_last_->getTracksOrigin();
+
+    // Merge tracks to get TracksMap between origin and incoming
+    // Input: ID of Wkp in origin. Output: ID of the tracked Wkp in incoming.
+    TracksMap tracks_origin_incoming = mergeTracks(tracks_origin_last, tracks_last_incoming);
+
+//    WOLF_DEBUG( "Merged " , tracks_last_incoming.size(), " tracks..." );
+
+    // Outliers rejection with essential matrix
+    cv::Mat E;
+    filterWithEssential(mwkps_origin, mwkps_incoming, tracks_origin_incoming, E);
+
+    // Edit tracks prev with only inliers wrt origin
+    // and remove also from mwkps_incoming all the keypoints that have not been tracked
+    TracksMap tracks_last_incoming_filtered;
+    KeyPointsMap mwkps_incoming_filtered;
+    for (auto & track_origin_incoming : tracks_origin_incoming){
+        for (auto & track_last_incoming : tracks_last_incoming){
+            if (track_origin_incoming.second == track_last_incoming.second){
+                tracks_last_incoming_filtered[track_last_incoming.first] = track_last_incoming.second;
+                mwkps_incoming_filtered[track_last_incoming.second] = mwkps_incoming.at(track_last_incoming.second);
+                continue;
+            }
+        }
+    }
+    WOLF_DEBUG( "Tracked " , mwkps_incoming_filtered.size(), " inliers in incoming" );
+
+
+    ////////////////////////////////
+    // if too few tracks left in incoming
+    // detect new KeyPoints in last and track them to incoming
+    ////////////////////////////////
+    size_t n_tracks_origin = tracks_origin_incoming.size();
+
+//    WOLF_DEBUG("# of tracks: ", n_tracks_origin, "; min # of tracks: ", params_visual_odometry_->min_features_for_keyframe);
+
+    if (n_tracks_origin < params_visual_odometry_->min_features_for_keyframe)
+    {
+
+//        WOLF_DEBUG( "Too Few Tracks. Detecting more keypoints in last" );
+
+        // Erase all keypoints previously added to the cell grid
+        cell_grid_.renew();
+
+        // Add last Keypoints that still form valid tracks between last and incoming
+        // And create a filtered map for last keypoints
+        KeyPointsMap mwkps_last_filtered;
+        for (auto track: tracks_last_incoming_filtered){
+            mwkps_last_filtered[track.first] = mwkps_last[track.first];
+            size_t last_kp_id = track.first;
+            cell_grid_.hitCell(capture_image_last_->getKeyPoints().at(last_kp_id).getCvKeyPoint());
+        }
+
+
+        // Detect new KeyPoints 
+        std::vector<cv::KeyPoint> kps_last_new;
+        
+        // Use the grid to detect new keypoints in empty cells
+        // We try a bunch of times to add keypoints to randomly selected empty regions of interest
+        for (int i=0; i < params_visual_odometry_->max_new_features; i++){
+            cv::Rect rect_roi;
+
+            bool is_empty = cell_grid_.pickRoi(rect_roi);
+            if (!is_empty) // no empty cells!
+            {
+                break;
+            }
+            cv::Mat img_roi(img_last, rect_roi);  // no data copy -> no overhead
+            std::vector<cv::KeyPoint> kps_roi;
+            detector_->detect(img_roi, kps_roi);
+            if (kps_roi.size() > 0){
+                // retain only the best keypoint in each region of interest
+                retainBest(kps_roi, 1);
+
+                // Keypoints are detected in the local coordinates of the region of interest
+                // -> translate to the full image corner coordinate system
+                kps_roi.at(0).pt.x = kps_roi.at(0).pt.x + rect_roi.x;
+                kps_roi.at(0).pt.y = kps_roi.at(0).pt.y + rect_roi.y;
+                kps_last_new.push_back(kps_roi.at(0));
+
+                // update grid with this detection
+                cell_grid_.hitCell(kps_roi.at(0));
+            }
+            else
+            {
+                // block this grid's cell so that it is not reused for detection
+                cell_grid_.blockCell(rect_roi);
+            }
+        }
+
+//        WOLF_DEBUG("Detected ", kps_last_new.size(), " new raw keypoints");
+        
+        // Create a map of wolf KeyPoints to track only the new ones
+        KeyPointsMap mwkps_last_new, mwkps_incoming_new;
+        for (auto & cvkp : kps_last_new){
+            WKeyPoint wkp(cvkp);
+            mwkps_last_new[wkp.getId()] = wkp;
+        } 
+        WOLF_DEBUG("Detected ", mwkps_last_new.size(), " new keypoints in last");
+
+        TracksMap tracks_last_incoming_new = kltTrack(img_last, img_incoming, mwkps_last_new, mwkps_incoming_new);
+
+        WOLF_DEBUG("Tracked ", mwkps_incoming_new.size(), " inliers in incoming");
+
+//        WOLF_DEBUG("Tracked ", mwkps_incoming_new.size(), " new keypoints to incoming");
+
+        // Concatenation of old tracks and new tracks
+        for (auto & track: tracks_last_incoming_new){
+            tracks_last_incoming_filtered[track.first] = track.second;
+            mwkps_last_filtered[track.first] = mwkps_last_new[track.first];
+            mwkps_incoming_filtered[track.second] = mwkps_incoming_new[track.second];
+        }
+
+        // Outliers rejection with essential matrix
+        // tracks that are not geometrically consistent are removed from tracks_last_incoming_new
+        filterWithEssential(mwkps_last_filtered, mwkps_incoming_filtered, tracks_last_incoming_filtered, E);
+
+        WOLF_DEBUG("New total : ", n_tracks_origin, " + ", mwkps_incoming_new.size(), " = ", tracks_last_incoming_filtered.size(), " tracks");
+
+        // add a flag so that voteForKeyFrame use it to vote for a KeyFrame 
+        capture_image_incoming_->setLastWasRepopulated(true);
+
+        // Update captures
+        capture_image_last_->addKeyPoints(mwkps_last_new);
+
+    }
+    else
+    {
+        WOLF_DEBUG("\n\n");
+    }
+
+    // Update captures
+    capture_image_incoming_->addKeyPoints(mwkps_incoming_filtered);
+    capture_image_incoming_->setTracksPrev(tracks_last_incoming_filtered);
+    capture_image_incoming_->setTracksOrigin(tracks_origin_incoming);
+
+    auto __attribute__((unused)) dt_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count();
+    WOLF_DEBUG( "dt_preprocess (ms): " , dt_preprocess );
+
+    return;
+}
+
+
+unsigned int ProcessorVisualOdometry::processKnown()
+{
+
+    auto t1 = std::chrono::system_clock::now();
+    // Extend the process track matrix by using information stored in the incoming capture
+
+    // Get tracks present at the last capture time (should be the most recent snapshot at this moment)
+    std::list<FeatureBasePtr> tracks_snapshot_last = track_matrix_.snapshotAsList(last_ptr_);
+
+    TracksMap tracks_map_li = capture_image_incoming_->getTracksPrev();
+    for (auto feature_tracked_last: tracks_snapshot_last){
+        // check if the keypoint in the last capture is in the last->incoming TracksMap stored in the incoming capture
+        FeaturePointImagePtr feat_pi_last = std::dynamic_pointer_cast<FeaturePointImage>(feature_tracked_last);
+        size_t id_feat_last = feat_pi_last->getKeyPoint().getId();  
+
+        // if this feature id is in the last->incoming tracks of capture incoming, the track is continued
+        // otherwise we store the pair as a newly detected track (for processNew)
+        TracksMap tracks_map_li = capture_image_incoming_->getTracksPrev();
+        if (tracks_map_li.count(id_feat_last)){
+            // WOLF_DEBUG("A corresponding track has been found for id_feat_last ", id_feat_last );
+            auto kp_track_li = tracks_map_li.find(id_feat_last);
+            // create a feature using the corresponding WKeyPoint contained in incoming (hence the "second")
+            auto feat_inco = FeatureBase::emplace<FeaturePointImage>(
+                                                    capture_image_incoming_, 
+                                                    capture_image_incoming_->getKeyPoints().at(kp_track_li->second), 
+                                                    pixel_cov_);
+            track_matrix_.add(feat_pi_last->trackId(), feat_inco);
+
+            // add tracks_map_li to a vector so that it so that 
+            // Very BAD to have to create a new std pair here -> find a better syntax but that's the idead
+            auto kp_track_li_matched = std::pair<size_t, size_t>(kp_track_li->first, kp_track_li->second);
+            tracks_map_li_matched_.insert(kp_track_li_matched);
+        }
+    }
+    auto __attribute__((unused)) dt_processKnown = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count();
+    WOLF_DEBUG( "dt_processKnown (ms): " , dt_processKnown );
+
+    // return number of successful tracks until incoming
+    return tracks_map_li_matched_.size();
+}
+
+
+unsigned int ProcessorVisualOdometry::processNew(const int& _max_features)
+{
+    auto t1 = std::chrono::system_clock::now();
+
+    // A new keyframe was detected:
+    // last_ptr_ is going to become origin_ptr and
+    // icoming_ptr_ is going to become last_ptr
+    // So we need to reset the origin tracks of incoming used in preProcess so that they correspond to the future origin (currently last)  
+    capture_image_incoming_->setTracksOrigin(capture_image_incoming_->getTracksPrev());
+
+    // We have matched the tracks in the track matrix with the last->incoming tracks 
+    // stored in the TracksMap from getTracksPrev()
+    // Now we need to add new tracks in the track matrix for the NEW tracks.
+    //
+    // Use book-keeping prepared in processKnown: the TracksMap that have been matched were stored in tracks_map_li_matched_
+    // and here add tracks only for those that have not been matched
+
+    unsigned int counter_new_tracks = 0;
+    for (std::pair<size_t,size_t> track_li: capture_image_incoming_->getTracksPrev()){
+        // if track not matched, then create a new track in the track matrix etc.
+        if (!tracks_map_li_matched_.count(track_li.first)){
+            // create a new last feature, a new track using this last feature and add the incoming feature to this track
+            WKeyPoint kp_last = capture_image_last_->getKeyPoints().at(track_li.first);
+            WKeyPoint kp_inco = capture_image_incoming_->getKeyPoints().at(track_li.second);
+            FeaturePointImagePtr feat_pi_last = FeatureBase::emplace<FeaturePointImage>(capture_image_last_, kp_last, pixel_cov_);
+            FeaturePointImagePtr feat_pi_inco = FeatureBase::emplace<FeaturePointImage>(capture_image_incoming_, kp_inco, pixel_cov_);
+            track_matrix_.newTrack(feat_pi_last);
+            track_matrix_.add(feat_pi_last->trackId(), feat_pi_inco);
+            counter_new_tracks++;
+        }
+    }
+
+    auto __attribute__((unused)) dt_processNew = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count();
+    WOLF_DEBUG( "dt_processNew (ms): " , dt_processNew );
+
+    return counter_new_tracks;
+}
+
+
+
+
+
+void ProcessorVisualOdometry::establishFactors()
+{
+    // Function only called when KF is created using last
+    // Loop over the snapshot in corresponding to last capture. Does 2 things:
+    //     1) for tracks already associated to a landmark, create a KF-Lmk factor between the last KF and the landmark.
+    //     2) if the feature track is not associated to a landmark yet and is long enough, create a new landmark
+    //        using triangulation as a prior, using previous KF current estimates. Create a KF-Lmk factor for all these KFs. 
+    //        For bookkeeping, define the landmark id as the track id.
+
+    std::list<FeatureBasePtr> tracks_snapshot_last = track_matrix_.snapshotAsList(last_ptr_);
+
+    if(tracks_snapshot_last.empty())
+    {
+        WOLF_WARN("Trying to establish factors but no features exist in last Capture!");
+        return;
+    }
+
+    for (auto feat: tracks_snapshot_last)
+    {
+        auto feat_pi = std::static_pointer_cast<FeaturePointImage>(feat);
+
+        // verify if a landmark is associated to this track (BAD implementation)
+        LandmarkBasePtr associated_lmk = nullptr;
+        for (auto lmk: getProblem()->getMap()->getLandmarkList())
+        {
+            if (lmk->id() == feat_pi->trackId()){
+                associated_lmk = lmk;
+            }
+        }
+
+        // 1) create a factor between new KF and assocatiated track landmark
+        //    HYP: assuming the trackid are the same as the landmark ID -> BAD if other types of landmarks involved
+        if (associated_lmk)
+        {
+            LandmarkHpPtr associated_lmk_hp = std::dynamic_pointer_cast<LandmarkHp>(associated_lmk);
+            FactorBase::emplace<FactorPixelHp>(feat_pi,
+                                               feat_pi,
+                                               associated_lmk_hp,
+                                               shared_from_this(),
+                                               params_visual_odometry_->apply_loss_function);
+        }
+
+        // 2) create landmark if track is not associated with one and has enough length
+        else if(track_matrix_.trackSize(feat->trackId()) >= params_visual_odometry_->min_track_length_for_landmark)
+        {
+            // std::cout << "NEW valid track \\o/" << std::endl;
+            LandmarkBasePtr lmk = emplaceLandmark(feat_pi);
+            lmk->setId(feat_pi->trackId());
+
+            // Add factors from all KFs of this track to the new lmk
+            Track track_kf = track_matrix_.trackAtKeyframes(feat->trackId());
+            for (auto feat_kf: track_kf)
+            {
+                LandmarkHpPtr lmk_hp = std::dynamic_pointer_cast<LandmarkHp>(lmk);
+                FactorBase::emplace<FactorPixelHp>(feat_kf.second,
+                                                   feat_kf.second,
+                                                   lmk_hp, shared_from_this(),
+                                                   params_visual_odometry_->apply_loss_function);
+            }
+        }
+    }
+
+    return;
+}
+
+LandmarkBasePtr ProcessorVisualOdometry::emplaceLandmark(FeatureBasePtr _feat)
+{
+    // Taken from processor_bundle_adjustment
+    // Initialize the landmark in its ray (based on pixel meas) and using a arbitrary distance
+
+    FeaturePointImagePtr feat_pi = std::static_pointer_cast<FeaturePointImage>(_feat);
+    Eigen::Vector2d point2d = _feat->getMeasurement();
+
+    Eigen::Vector3d point3d;
+    point3d = pinhole::backprojectPoint(
+            getSensor()->getIntrinsic()->getState(),
+            (std::static_pointer_cast<SensorCamera>(getSensor()))->getCorrectionVector(),
+            point2d);
+
+    // double distance = params_bundle_adjustment_->distance; // arbitrary value
+    double distance = 1;
+    Eigen::Vector4d vec_homogeneous_c;
+    vec_homogeneous_c = {point3d(0),point3d(1),point3d(2),point3d.norm()/distance};
+
+    // lmk from camera to world coordinate frame.
+    Transform<double,3,Isometry> T_w_r
+        = Translation<double,3>(feat_pi->getFrame()->getP()->getState())
+        * Quaterniond(_feat->getFrame()->getO()->getState().data());
+    Transform<double,3,Isometry> T_r_c
+		= Translation<double,3>(_feat->getCapture()->getSensorP()->getState())
+        * Quaterniond(_feat->getCapture()->getSensorO()->getState().data());
+    Eigen::Matrix<double, 4, 1> vec_homogeneous_w = T_w_r
+                                           * T_r_c
+                                           * vec_homogeneous_c;
+
+    // normalize to make equivalent to a unit quaternion
+    vec_homogeneous_w.normalize();
+
+    auto lmk_hp_ptr = LandmarkBase::emplace<LandmarkHp>(getProblem()->getMap(), 
+                                                        vec_homogeneous_w, 
+                                                        feat_pi->getKeyPoint().getDescriptor());
+
+    // Set all IDs equal to track ID
+    size_t track_id = _feat->trackId();
+    lmk_hp_ptr->setId(track_id);
+    _feat->setLandmarkId(track_id);
+
+    return lmk_hp_ptr;
+}
+
+
+void ProcessorVisualOdometry::postProcess()
+{
+    frame_count_ ++;
+
+    // delete tracks with no keyframes
+    for (auto track_it = track_matrix_.getTracks().begin(); track_it != track_matrix_.getTracks().end(); /* do not increment iterator yet... */)
+    {
+        auto track_id = track_it->first;
+        if (track_matrix_.trackAtKeyframes(track_id).empty())
+        {
+            ++track_it;                      // ... increment iterator **before** erasing the element!!!
+            track_matrix_.remove(track_id);
+        }
+        else
+            ++track_it;
+    }
+
+    // print a bar with the number of active tracks in incoming
+    unsigned int n = capture_image_incoming_->getKeyPoints().size();
+    std::string s(n/2, '#');
+    WOLF_INFO("TRACKS: ", n, " ", s);
+
+    n = getProblem()->getMap()->getLandmarkList().size();
+    s = std::string(n/2, '-');
+    WOLF_INFO("LMARKS: ", n, " ", s);
+
+
+}
+
+bool ProcessorVisualOdometry::voteForKeyFrame() const
+{
+
+    // If the last capture was repopulated in preProcess, it means that the number of tracks fell
+    // below a threshold in the current incoming track and that, as a consequence, last capture keypoints
+    // was repopulated. In this case, the processor needs to create a new Keyframe whatever happens.
+    CaptureImagePtr capture_image_incoming = std::dynamic_pointer_cast<CaptureImage>(incoming_ptr_);
+    bool vote = capture_image_incoming->getLastWasRepopulated();
+
+    // simple vote based on frame count, should be changed to something that takes into account number of tracks alive, parallax, etc.
+    // vote = vote || ((frame_count_ % 5) == 0);
+
+    vote = vote || incoming_ptr_->getFeatureList().size() < params_visual_odometry_->min_features_for_keyframe;
+
+    return vote;
+}
+
+
+void ProcessorVisualOdometry::advanceDerived()
+{
+    // reinitilize the bookeeping to communicate info from processKnown to processNew
+    tracks_map_li_matched_.clear();
+}
+
+void ProcessorVisualOdometry::resetDerived()
+{
+    // reinitilize the bookeeping to communicate info from processKnown to processNew
+    tracks_map_li_matched_.clear();
+}
+
+
+
+void ProcessorVisualOdometry::setParams(const ParamsProcessorVisualOdometryPtr _params)
+{
+    params_visual_odometry_ = _params;
+}
+
+TracksMap ProcessorVisualOdometry::kltTrack(const cv::Mat _img_prev, const cv::Mat _img_curr, const KeyPointsMap &_mwkps_prev, KeyPointsMap &_mwkps_curr)
+{
+    if (_mwkps_prev.empty()) return TracksMap();
+
+    TracksMap tracks_prev_curr;
+
+    // Create cv point list for tracking, we initialize optical flow with previous keypoints
+    // We also need a list of indices to build the track map
+    std::vector<cv::Point2f> p2f_prev;
+    std::vector<size_t> indices_prev;
+    for (auto & wkp : _mwkps_prev){
+        p2f_prev.push_back(wkp.second.getCvKeyPoint().pt);
+        indices_prev.push_back(wkp.first);
+    }
+    std::vector<cv::Point2f> p2f_curr = p2f_prev;
+
+    // Configure and process KLT optical flow research
+    std::vector<uchar> status;
+    std::vector<float> err;
+
+
+
+    // Process one way: previous->current with current init with previous
+    ParamsProcessorVisualOdometry::KltParams prms = params_visual_odometry_->klt;
+    cv::calcOpticalFlowPyrLK(
+            _img_prev,
+            _img_curr, 
+            p2f_prev,
+            p2f_curr,
+            status, err,
+            {prms.patch_width, prms.patch_height},
+            prms.nlevels_pyramids,
+            prms.criteria,
+            (cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS));
+    
+    // Process the other way: current->previous
+    std::vector<uchar> status_back;
+    std::vector<float> err_back;
+    cv::calcOpticalFlowPyrLK(
+            _img_curr,
+            _img_prev,
+            p2f_curr,
+            p2f_prev,
+            status_back, err_back,
+            {prms.patch_width, prms.patch_height},
+            prms.nlevels_pyramids,
+            prms.criteria,
+            (cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS));
+
+    // Delete point if KLT failed
+    for (size_t j = 0; j < status.size(); j++) {
+
+        if(!status_back.at(j)  ||  (err_back.at(j) > prms.max_err) ||
+           !status.at(j)  ||  (err.at(j) > prms.max_err)) {
+            continue;
+        }
+
+        // We keep the initial point and add the tracked point
+        WKeyPoint wkp(cv::KeyPoint(p2f_curr.at(j), 1));
+        _mwkps_curr[wkp.getId()] = wkp;
+
+        // Update the map
+        tracks_prev_curr[indices_prev.at(j)] = wkp.getId();
+
+        // Other checks? Distance between track points?
+    }
+
+    return tracks_prev_curr;
+}
+
+bool ProcessorVisualOdometry::filterWithEssential(const KeyPointsMap _mwkps_prev, const KeyPointsMap _mwkps_curr, TracksMap &_tracks_prev_curr, cv::Mat &_E)
+{
+    ParamsProcessorVisualOdometry::RansacParams prms = params_visual_odometry_->ransac;
+
+    // We need to build lists of pt2d for openCV function
+    std::vector<cv::Point2d> p2d_prev, p2d_curr;
+    std::vector<size_t> all_indices;
+    for (auto & track : _tracks_prev_curr){
+        all_indices.push_back(track.first);
+        Eigen::Vector2d ray_prev = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_prev.at(track.first).getEigenKeyPoint());
+        Eigen::Vector2d ray_curr = pinhole::depixellizePoint(sen_cam_->getPinholeModel(), _mwkps_curr.at(track.second).getEigenKeyPoint());
+        p2d_prev.push_back(cv::Point2d(ray_prev.x(), ray_prev.y()));
+        p2d_curr.push_back(cv::Point2d(ray_curr.x(), ray_curr.y()));
+    }
+
+    // We need at least five tracks
+    if (p2d_prev.size() < 5) return false;
+
+    cv::Mat cvMask;
+    cv::Mat K = cv::Mat::eye(3,3,CV_32F);
+    double focal = (sen_cam_->getIntrinsicMatrix()(0,0) +
+                   sen_cam_->getIntrinsicMatrix()(1,1)) / 2;
+
+    _E = cv::findEssentialMat(p2d_prev, 
+                              p2d_curr, 
+                              Kcv_, 
+                              cv::RANSAC,
+                              prms.prob,
+                              prms.thresh / focal,
+                              cvMask);
+    
+    // Let's remove outliers from tracksMap
+    for (size_t k=0; k<all_indices.size(); k++){
+        if (cvMask.at<bool>(k) == 0){
+            _tracks_prev_curr.erase(all_indices.at(k));
+        }
+    }
+
+    return true;
+}
+
+void ProcessorVisualOdometry::retainBest(std::vector<cv::KeyPoint> &_keypoints, int n)
+{
+    if (_keypoints.size() > n) {
+        if (n == 0) {
+            _keypoints.clear();
+            return;
+        }
+        std::nth_element(_keypoints.begin(), _keypoints.begin() + n, _keypoints.end(),
+            [](cv::KeyPoint& a, cv::KeyPoint& b) { return a.response > b.response; });
+        _keypoints.resize(n);
+    }
+}
+
+} //namespace wolf
+
+// Register in the FactoryProcessor
+#include "core/processor/factory_processor.h"
+namespace wolf {
+WOLF_REGISTER_PROCESSOR(ProcessorVisualOdometry)
+WOLF_REGISTER_PROCESSOR_AUTO(ProcessorVisualOdometry)
+} // namespace wolf
+
diff --git a/src/sensor/sensor_camera.cpp b/src/sensor/sensor_camera.cpp
index 7870993982db5164dfac99f4982a89c2021a3625..311e2212bfb260646092b8ae9a48fb6f6d716c50 100644
--- a/src/sensor/sensor_camera.cpp
+++ b/src/sensor/sensor_camera.cpp
@@ -30,17 +30,24 @@ namespace wolf
 {
 
 SensorCamera::SensorCamera(const Eigen::VectorXd& _extrinsics, const ParamsSensorCamera& _intrinsics) :
-                SensorBase("SensorCamera", std::make_shared<StateBlock>(_extrinsics.head(3), true), std::make_shared<StateQuaternion>(_extrinsics.tail(4), true), std::make_shared<StateBlock>(_intrinsics.pinhole_model_raw, true), 1),
+                SensorBase("SensorCamera", 
+                           std::make_shared<StateBlock>(_extrinsics.head(3), true), 
+                           std::make_shared<StateQuaternion>(_extrinsics.tail(4), true), 
+                           std::make_shared<StateBlock>(_intrinsics.pinhole_model_raw, true), 
+                           1),
                 img_width_(_intrinsics.width), //
                 img_height_(_intrinsics.height), //
                 distortion_(_intrinsics.distortion), //
                 correction_(distortion_.size()==0 ? 0 : distortion_.size() + 1), // make correction vector slightly larger in size than the distortion vector
                 pinhole_model_raw_(_intrinsics.pinhole_model_raw), //
                 pinhole_model_rectified_(_intrinsics.pinhole_model_rectified), //
-                using_raw_(true)
+                using_raw_(_intrinsics.using_raw)
 {
     assert(_extrinsics.size() == 7 && "Wrong intrinsics vector size. Should be 7 for 3d");
-    useRawImages();
+    if (using_raw_)
+        useRawImages();
+    else
+        useRectifiedImages();
     pinhole::computeCorrectionModel(getIntrinsic()->getState(), distortion_, correction_);
 }
 
@@ -55,7 +62,15 @@ SensorCamera::~SensorCamera()
     //
 }
 
-Eigen::Matrix3d SensorCamera::setIntrinsicMatrix(Eigen::Vector4d _pinhole_model)
+Eigen::Matrix<double, 3, 4> SensorCamera::getProjectionMatrix() const
+{
+    Eigen::Matrix<double, 3, 4> P;
+    P.setZero();
+    P.leftCols(3) = getIntrinsicMatrix();
+    return P;
+}
+
+Eigen::Matrix3d SensorCamera::computeIntrinsicMatrix(const Eigen::Vector4d& _pinhole_model) const
 {
     Eigen::Matrix3d K;
     K(0, 0) = _pinhole_model(2);
diff --git a/src/yaml/processor_image_yaml.cpp b/src/yaml/processor_image_yaml.cpp
deleted file mode 100644
index 6628e3ad7449c02d76700772e1e2e14d892ed7d7..0000000000000000000000000000000000000000
--- a/src/yaml/processor_image_yaml.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-/**
- * \file processor_image_yaml.cpp
- *
- *  Created on: May 21, 2016
- *      \author: jsola
- */
-
-#include "vision/internal/config.h"
-#include "vision/processor/processor_params_image.h"
-
-// wolf yaml
-#include "core/yaml/yaml_conversion.h"
-
-// wolf
-#include "core/common/factory.h"
-
-// yaml-cpp library
-#include <yaml-cpp/yaml.h>
-
-
-namespace wolf
-{
-namespace
-{
-static ParamsProcessorBasePtr createParamsProcessorImage(const std::string & _filename_dot_yaml)
-{
-    using std::string;
-    using YAML::Node;
-
-    std::shared_ptr<ParamsProcessorTrackerFeatureImage> p = std::make_shared<ParamsProcessorTrackerFeatureImage>();
-
-    Node params = YAML::LoadFile(_filename_dot_yaml);
-
-    if (!params.IsNull())
-    {
-    	Node dd_yaml = params["vision_utils"];
-   	    p->yaml_file_params_vision_utils = dd_yaml["YAML file params"].as<std::string>();
-   	    // Check if relative path
-   	    if (!p->yaml_file_params_vision_utils.empty())
-   	    {
-   	        if (p->yaml_file_params_vision_utils[0] != '/')
-   	        {
-   	            std::string wolf_root = _WOLF_VISION_ROOT_DIR;
-   	            std::cout << "Wolf root: " << wolf_root << std::endl;
-   	            std::string abs_path = wolf_root + "/demos/" + p->yaml_file_params_vision_utils;
-   	            p->yaml_file_params_vision_utils = abs_path;
-   	        }
-   	    }
-
-        Node alg = params["algorithm"];
-        p->max_new_features = alg["maximum new features"].as<unsigned int>();
-        p->min_features_for_keyframe = alg["minimum features for new keyframe"].as<unsigned int>();
-        p->min_response_for_new_features = alg["minimum response for new features"].as<float>();
-        p->time_tolerance= alg["time tolerance"].as<double>();
-        p->distance= alg["distance"].as<double>();
-
-        Node noi = params["noise"];
-        p->pixel_noise_std = noi["pixel noise std"].as<double>();
-        p->pixel_noise_var = p->pixel_noise_std * p->pixel_noise_std;
-    }
-
-    return p;
-}
-
-// Register in the FactorySensor
-const bool WOLF_UNUSED registered_prc_image_feature_par = FactoryParamsProcessor::registerCreator("ProcessorTrackerFeatureImage", createParamsProcessorImage);
-const bool WOLF_UNUSED registered_prc_image_landmark_par = FactoryParamsProcessor::registerCreator("ProcessorTrackerLandmarkImage", createParamsProcessorImage);
-
-}
-}
diff --git a/src/yaml/processor_tracker_feature_trifocal_yaml.cpp b/src/yaml/processor_tracker_feature_trifocal_yaml.cpp
deleted file mode 100644
index 5be8a61390dc567e3d416e874dde7e6f7384146a..0000000000000000000000000000000000000000
--- a/src/yaml/processor_tracker_feature_trifocal_yaml.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-/*
- * processor_tracker_feature_trifocal_yaml.cpp
- *
- *  Created on: Apr 12, 2018
- *      Author: asantamaria
- */
-
-// wolf
-#include "vision/processor/processor_tracker_feature_trifocal.h"
-
-#include "core/yaml/yaml_conversion.h"
-#include "core/common/factory.h"
-
-// yaml-cpp library
-#include <yaml-cpp/yaml.h>
-
-namespace wolf
-{
-
-namespace
-{
-static ParamsProcessorBasePtr createParamsProcessorTrackerFeatureTrifocal(const std::string & _filename_dot_yaml)
-{
-    YAML::Node config = YAML::LoadFile(_filename_dot_yaml);
-
-    if (config.IsNull())
-    {
-        WOLF_ERROR("Invalid YAML file!");
-        return nullptr;
-    }
-    else if (config["type"].as<std::string>() == "ProcessorTrackerFeatureTrifocal")
-    {
-        ParamsProcessorTrackerFeatureTrifocalPtr params = std::make_shared<ParamsProcessorTrackerFeatureTrifocal>();
-
-        YAML::Node vision_utils               = config      ["vision_utils"];
-        params->yaml_file_params_vision_utils = vision_utils["YAML file params"].as<std::string>();
-
-        // relative to global path for Vision Utils YAML
-        assert(params->yaml_file_params_vision_utils.at(0) != ('/') && "The parameter YAML FILE PARAMS (in processor params YAML file) must be specified with a path relative to the processor YAML file.");
-        unsigned first = _filename_dot_yaml.find("/");
-        unsigned last = _filename_dot_yaml.find_last_of("/");
-        std::string strNew = _filename_dot_yaml.substr (first,last-first);
-        params->yaml_file_params_vision_utils = _filename_dot_yaml.substr (first,last-first) + "/" + params->yaml_file_params_vision_utils;
-
-        YAML::Node algorithm                    = config   ["algorithm"];
-        params->time_tolerance                  = algorithm["time tolerance"]                 .as<double>();
-        params->voting_active                   = algorithm["voting active"]                  .as<bool>();
-        params->min_features_for_keyframe       = algorithm["minimum features for keyframe"]  .as<unsigned int>();
-        params->max_new_features                = algorithm["maximum new features"]           .as<int>();
-        params->n_cells_h                       = algorithm["grid horiz cells"]               .as<int>();
-        params->n_cells_v                       = algorithm["grid vert cells"]                .as<int>();
-        params->min_response_new_feature        = algorithm["min response new features"]      .as<int>();
-        params->min_track_length_for_factor     = algorithm["min track length for factor"].as<int>();
-        params->debug_view                      = algorithm["debug view"].as<bool>();
-
-        YAML::Node noise                      = config["noise"];
-        params->pixel_noise_std               = noise ["pixel noise std"].as<double>();
-
-        return params;
-    }
-    else
-    {
-        WOLF_ERROR("Wrong processor type! Should be \"TRACKER FEATURE TRIFOCAL\"");
-        return nullptr;
-    }
-    return nullptr;
-}
-
-// Register in the FactorySensor
-const bool WOLF_UNUSED registered_prc_trifocal = FactoryParamsProcessor::registerCreator("ProcessorTrackerFeatureTrifocal", createParamsProcessorTrackerFeatureTrifocal);
-
-} // namespace [unnamed]
-
-} // namespace wolf
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index fc45cf7431d4a54b73a93e0008f62dca79b6fc0a..ffd3390d04e0cceb6bb51e6eba877d932870b220 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -5,37 +5,34 @@ add_subdirectory(gtest)
 # Include gtest directory.
 include_directories(${GTEST_INCLUDE_DIRS})
 
-############# USE THIS TEST AS AN EXAMPLE #################
-#                                                         #
-# Create a specific test executable for gtest_example     #
-wolf_add_gtest(gtest_example gtest_example.cpp)           #
-target_link_libraries(gtest_example ${PLUGIN_NAME})      #
-#                                                         #
-###########################################################
-
-# Pinhole test
 wolf_add_gtest(gtest_pinhole gtest_pinhole.cpp)
 target_link_libraries(gtest_pinhole ${PLUGIN_NAME})
 
+wolf_add_gtest(gtest_capture_image gtest_capture_image.cpp)
+target_link_libraries(gtest_capture_image ${PLUGIN_NAME})
+
+wolf_add_gtest(gtest_feature_point_image gtest_feature_point_image.cpp)
+target_link_libraries(gtest_feature_point_image ${PLUGIN_NAME})
+
+
 wolf_add_gtest(gtest_sensor_camera gtest_sensor_camera.cpp)
 target_link_libraries(gtest_sensor_camera ${PLUGIN_NAME})
 
-# FactorTrifocal test
-wolf_add_gtest(gtest_factor_trifocal gtest_factor_trifocal.cpp)
-target_link_libraries(gtest_factor_trifocal ${PLUGIN_NAME})
+wolf_add_gtest(gtest_processor_visual_odometry gtest_processor_visual_odometry.cpp)
+target_link_libraries(gtest_processor_visual_odometry ${PLUGIN_NAME})
 
-# FactorFeatureEpipolar test
-wolf_add_gtest(gtest_factor_epipolar gtest_factor_epipolar.cpp)
-target_link_libraries(gtest_factor_epipolar ${PLUGIN_NAME})
+# # FactorTrifocal test
+# wolf_add_gtest(gtest_factor_trifocal gtest_factor_trifocal.cpp)
+# target_link_libraries(gtest_factor_trifocal ${PLUGIN_NAME})
 
-# ProcessorFeatureTrifocal test
-wolf_add_gtest(gtest_processor_tracker_feature_trifocal gtest_processor_tracker_feature_trifocal.cpp)
-target_link_libraries(gtest_processor_tracker_feature_trifocal ${PLUGIN_NAME})
+# # FactorFeatureEpipolar test
+# wolf_add_gtest(gtest_factor_epipolar gtest_factor_epipolar.cpp)
+# target_link_libraries(gtest_factor_epipolar ${PLUGIN_NAME})
 
-# ProcessorBundleAdjustment test
-wolf_add_gtest(gtest_processor_bundle_adjustment gtest_processor_bundle_adjustment.cpp)
-target_link_libraries(gtest_processor_bundle_adjustment ${PLUGIN_NAME})
+# # ProcessorBundleAdjustment test
+# wolf_add_gtest(gtest_processor_bundle_adjustment gtest_processor_bundle_adjustment.cpp)
+# target_link_libraries(gtest_processor_bundle_adjustment ${PLUGIN_NAME})
 
-# FactorPixelHp test
-wolf_add_gtest(gtest_factor_pixel_hp gtest_factor_pixel_hp.cpp)
-target_link_libraries(gtest_factor_pixel_hp ${PLUGIN_NAME})
+# # FactorPixelHp test -> depends on processor_bundle_adjustment.cpp
+# wolf_add_gtest(gtest_factor_pixel_hp gtest_factor_pixel_hp.cpp)
+# target_link_libraries(gtest_factor_pixel_hp ${PLUGIN_NAME})
diff --git a/test/camera_gazebo.yaml b/test/camera_gazebo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e34d1a065a948f914fcba16565135e9971620bc8
--- /dev/null
+++ b/test/camera_gazebo.yaml
@@ -0,0 +1,22 @@
+width: 1280
+height: 960
+camera_name: gazebo
+camera_matrix:
+  rows: 3
+  cols: 3
+  data: [640, 0.000000, 640, 
+         0.000000, 640, 480, 
+         0.000000, 0.000000, 1.000000]
+distortion_model: none
+distortion_coefficients:
+  rows: 1
+  cols: 5
+  data: [0, 0, 0, 0, 0]
+rectification_matrix:
+  rows: 3
+  cols: 3
+  data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
+projection_matrix:
+  rows: 3
+  cols: 4
+  data: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]
\ No newline at end of file
diff --git a/demos/demo_gazebo_x00cm_y-10cm.jpg b/test/demo_gazebo_x00cm_y-10cm.jpg
similarity index 100%
rename from demos/demo_gazebo_x00cm_y-10cm.jpg
rename to test/demo_gazebo_x00cm_y-10cm.jpg
diff --git a/demos/demo_gazebo_x00cm_y-20cm.jpg b/test/demo_gazebo_x00cm_y-20cm.jpg
similarity index 100%
rename from demos/demo_gazebo_x00cm_y-20cm.jpg
rename to test/demo_gazebo_x00cm_y-20cm.jpg
diff --git a/demos/demo_gazebo_x00cm_y00cm.jpg b/test/demo_gazebo_x00cm_y00cm.jpg
similarity index 100%
rename from demos/demo_gazebo_x00cm_y00cm.jpg
rename to test/demo_gazebo_x00cm_y00cm.jpg
diff --git a/test/gtest_capture_image.cpp b/test/gtest_capture_image.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..76fc8e6fe8429ac040749a8e83e5000a6474d913
--- /dev/null
+++ b/test/gtest_capture_image.cpp
@@ -0,0 +1,188 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+/**
+ * \file gtest_capture_image.cpp
+ *
+ *  Created on: March 31, 2022
+ *      \author: mfourmy
+ */
+
+#include <string>
+#include <core/utils/utils_gtest.h>
+#include <core/sensor/sensor_base.h>
+
+#include "vision/capture/capture_image.h"
+
+using namespace wolf;
+using namespace cv;
+
+class CaptureImage_test : public testing::Test
+{
+    public:
+        cv::Mat img_;
+
+        cv::KeyPoint cv_kp0_;
+        cv::KeyPoint cv_kp1_;
+        cv::KeyPoint cv_kp2_;
+
+        WKeyPoint wkp0_;
+        WKeyPoint wkp1_;
+        WKeyPoint wkp2_;
+
+        void SetUp() override
+        {
+            // to be sure that the counter start from zero each time a new test is created
+            // not really usefull outside of this test
+            WKeyPoint::resetIdCount();
+            img_ = cv::Mat::eye(4, 4, CV_8UC1);
+
+            cv_kp0_ = cv::KeyPoint(0.0, 0.0, 0);
+            cv_kp1_ = cv::KeyPoint(1.0, 0.0, 0);
+            cv_kp2_ = cv::KeyPoint(2.0, 0.0, 0);
+            wkp0_ = WKeyPoint(cv_kp0_);
+            wkp1_ = WKeyPoint  (cv_kp1_);
+            wkp2_ = WKeyPoint  (cv_kp2_);
+        }
+};
+
+TEST_F(CaptureImage_test, WKeyPoint_class)
+{
+    // WKeyPoint ids start from 3 since because the default constructor 
+    // is called in the declaration of CaptureImage_test attributes
+    ASSERT_EQ(wkp0_.getId(), 1);
+    ASSERT_EQ(wkp1_.getId(), 2);
+    ASSERT_EQ(wkp2_.getId(), 3);
+
+    ASSERT_EQ(wkp0_.getCvKeyPoint().pt.x, 0.0);
+    ASSERT_EQ(wkp1_.getCvKeyPoint().pt.x, 1.0);
+    ASSERT_EQ(wkp2_.getCvKeyPoint().pt.x, 2.0);
+
+    wkp0_.setCvKeyPoint(cv_kp1_);
+    ASSERT_EQ(wkp0_.getCvKeyPoint().pt.x, 1.0);
+
+    cv::Mat desc = 3*cv::Mat::eye(4, 4, CV_8UC1);
+    wkp0_.setDescriptor(desc);
+    ASSERT_EQ(wkp0_.getDescriptor().at<uchar>(0,0), 3);
+}
+
+
+TEST_F(CaptureImage_test, capture_image_type)
+{
+    CaptureImagePtr c = std::make_shared<CaptureImage>(0, nullptr, img_);
+
+    ASSERT_EQ(c->getType(), "CaptureImage");
+}
+
+TEST_F(CaptureImage_test, getter_setters_img)
+{
+    CaptureImagePtr c = std::make_shared<CaptureImage>(0, nullptr, img_);
+    cv::Mat temp = c->getImage();
+    ASSERT_EQ(temp.at<uchar>(0,0), 1);
+
+    // expected behavior: changing the external image does not change the image
+    // inside the capture as they share the same underlying data array (as "=" operators are used)
+    temp = 3*cv::Mat::eye(4, 4, CV_8UC1);
+    ASSERT_EQ(temp.at<uchar>(0,0), 3);
+
+    cv::Mat temp2 = 6*cv::Mat::eye(4, 4, CV_8UC1);
+    c->setImage(temp2);
+    ASSERT_EQ(c->getImage().at<uchar>(0,0), 6);
+}
+
+
+TEST_F(CaptureImage_test, add_remove_key_points)
+{
+    CaptureImagePtr c = std::make_shared<CaptureImage>(0, nullptr, img_);
+    c->addKeyPoint(wkp0_);
+    c->addKeyPoint(wkp1_);
+    c->addKeyPoint(wkp2_);
+
+    ASSERT_EQ(c->getKeyPoints().size(), 3);
+
+    c->removeKeyPoint(wkp0_.getId());
+    ASSERT_EQ(c->getKeyPoints().size(), 2);
+
+    c->removeKeyPoint(wkp1_);
+    ASSERT_EQ(c->getKeyPoints().size(), 1);
+
+    // only wkp2 is left
+
+    ASSERT_EQ(c->getKeyPoints().at(3).getId(), 3);
+    ASSERT_EQ(c->getKeyPoints().at(3).getCvKeyPoint().pt.x, 2.0);
+
+    // create a new WKeyPoint and add it to the keypoint map
+    // the new WKeyPoint ID is therefore 3 as well as its key in the map
+    c->addKeyPoint(cv_kp0_);  
+    ASSERT_EQ(c->getKeyPoints().at(4).getId(), 4);
+    ASSERT_EQ(c->getKeyPoints().at(4).getCvKeyPoint().pt.x, 0.0);
+
+
+}
+
+TEST_F(CaptureImage_test, add_remove_key_points_using_map)
+{
+    CaptureImagePtr c = std::make_shared<CaptureImage>(0, nullptr, img_);
+    // Now using a KeyPointsMap
+    KeyPointsMap mapwkps;
+    mapwkps.insert(std::pair<size_t, WKeyPoint>(wkp0_.getId(), wkp0_));
+    mapwkps.insert(std::pair<size_t, WKeyPoint>(wkp1_.getId(), wkp1_));
+    mapwkps.insert(std::pair<size_t, WKeyPoint>(wkp2_.getId(), wkp2_));
+
+    c->addKeyPoints(mapwkps);
+    ASSERT_EQ(c->getKeyPoints().size(), 3);
+    ASSERT_EQ(c->getKeyPoints().at(3).getId(), 3);
+}
+
+
+
+
+TEST_F(CaptureImage_test, add_remove_key_point_vectors)
+{
+    // Same as add_remove_key_points but with the vector argument
+    CaptureImagePtr c = std::make_shared<CaptureImage>(0, nullptr, img_);
+    std::vector<WKeyPoint> wkp_vec = {wkp0_, wkp1_, wkp2_};
+    c->addKeyPoints(wkp_vec);
+    ASSERT_EQ(c->getKeyPoints().size(), 3);
+
+    // adding the same wolf WKeyPoint vector is indepotent 
+    c->addKeyPoints(wkp_vec);
+    ASSERT_EQ(c->getKeyPoints().size(), 3);
+
+    // but adding new cv::KeyPoint will create new WKeyPoint
+    std::vector<WKeyPoint> cv_kp_vec = {cv_kp0_, cv_kp1_, cv_kp2_};
+    c->addKeyPoints(cv_kp_vec);
+    ASSERT_EQ(c->getKeyPoints().size(), 6);
+    // at position 4 is the new WKeyPoint created from cv_kp1_
+    ASSERT_EQ(c->getKeyPoints().at(5).getId(), 5);
+    ASSERT_EQ(c->getKeyPoints().at(5).getCvKeyPoint().pt.x, 1.0);
+}
+
+
+
+
+
+int main(int argc, char **argv)
+{
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
+
diff --git a/test/gtest_example.cpp b/test/gtest_example.cpp
deleted file mode 100644
index c73292612597de2fb22c677ffd5bedf92c62daa4..0000000000000000000000000000000000000000
--- a/test/gtest_example.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#include <core/utils/utils_gtest.h>
-
-TEST(TestTest, DummyTestExample)
-{
-  EXPECT_FALSE(false);
-
-  ASSERT_TRUE(true);
-
-  int my_int = 5;
-
-  ASSERT_EQ(my_int, 5);
-
-//  PRINTF("All good at TestTest::DummyTestExample !\n");
-}
-
-int main(int argc, char **argv)
-{
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/test/gtest_factor_trifocal.cpp b/test/gtest_factor_trifocal.cpp
deleted file mode 100644
index 730cb93d8c5d9597ead34972095441bea6bb33bd..0000000000000000000000000000000000000000
--- a/test/gtest_factor_trifocal.cpp
+++ /dev/null
@@ -1,992 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-#include <core/utils/utils_gtest.h>
-
-#include "core/utils/logging.h"
-
-#include "core/ceres_wrapper/solver_ceres.h"
-#include "vision/processor/processor_tracker_feature_trifocal.h"
-#include "vision/capture/capture_image.h"
-#include "vision/factor/factor_trifocal.h"
-#include "vision/internal/config.h"
-
-using namespace Eigen;
-using namespace wolf;
-
-class FactorTrifocalTest : public testing::Test{
-    public:
-        Vector3d    pos1,   pos2,   pos3,   pos_cam, point;
-        Vector3d    euler1, euler2, euler3, euler_cam;
-        Quaterniond quat1,  quat2,  quat3,  quat_cam;
-        Vector4d    vquat1, vquat2, vquat3, vquat_cam; // quaternions as vectors
-        Vector7d    pose1,  pose2,  pose3,  pose_cam;
-
-        ProblemPtr      problem;
-        SolverCeresPtr  solver;
-
-        SensorCameraPtr camera;
-        ProcessorTrackerFeatureTrifocalPtr proc_trifocal;
-
-        SensorBasePtr   S;
-        FrameBasePtr    F1, F2, F3;
-        CaptureImagePtr I1, I2, I3;
-        FeatureBasePtr  f1, f2, f3;
-        FactorTrifocalPtr c123;
-
-        double pixel_noise_std;
-
-        ~FactorTrifocalTest() override
-        {
-            std::cout << "destructor\n";
-        }
-
-        void SetUp() override
-        {
-            std::string wolf_root = _WOLF_VISION_ROOT_DIR;
-
-            // configuration
-            /*
-             * We have three robot poses, in three frames, with cameras C1, C2, C3
-             * looking towards the origin of coordinates:
-             *
-             *              Z
-             *              |
-             *           ________  C3
-             *          /   |   /
-             *         ---------           /| C2
-             *              |             / |
-             *              |____________/_ | ___ Y
-             *             /             |  /
-             *            /              | /
-             *      --------- C1         |/
-             *      |   /   |
-             *      ---------
-             *        /
-             *       Y
-             *
-             * Each robot pose is at one axis, facing the origin:
-             *   F1: pos = (1,0,0), ori = (0,0,pi)
-             *   F2: pos = (0,1,0), ori = (0,0,-pi/2)
-             *   F3: pos = (0,0,1), ori = (0,pi/2,pi)
-             *
-             * The robot has a camera looking forward
-             *   S: pos = (0,0,0), ori = (-pi/1, 0, -pi/1)
-             *
-             * There is a point at the origin
-             *   P: pos = (0,0,0)
-             *
-             * The camera is canonical
-             *   K = Id.
-             *
-             * Therefore, P projects exactly at the origin on each camera,
-             * creating three features:
-             *   f1: p1 = (0,0)
-             *   f2: p2 = (0,0)
-             *   f3: p3 = (0,0)
-             *
-             * We form a Wolf tree with three frames, three captures,
-             * three features, and one trifocal factor:
-             *
-             *   Frame F1, Capture C1, feature f1
-             *   Frame F2, Capture C2, feature f2
-             *   Frame F3, Capture C3, feature f3, factor c123
-             *
-             * The three frame poses F1, F2, F3 and the camera pose S
-             * in the robot frame are variables subject to optimization
-             *
-             * We perform a number of tests based on this configuration.
-             */
-
-            // all frames look to the origin
-            pos1   << 1, 0, 0;
-            pos2   << 0, 1, 0;
-            pos3   << 0, 0, 1;
-            euler1 << 0, 0     ,  M_PI   ;
-            euler2 << 0, 0     , -M_PI_2 ;
-            euler3 << 0, M_PI_2,  M_PI   ;
-            quat1  =  e2q(euler1);
-            quat2  =  e2q(euler2);
-            quat3  =  e2q(euler3);
-            vquat1 =  quat1.coeffs();
-            vquat2 =  quat2.coeffs();
-            vquat3 =  quat3.coeffs();
-            pose1  << pos1, vquat1;
-            pose2  << pos2, vquat2;
-            pose3  << pos3, vquat3;
-
-            // camera at the robot origin looking forward
-            pos_cam   <<  0, 0, 0;
-            euler_cam << -M_PI_2, 0, -M_PI_2;// euler_cam *= M_TORAD;
-            quat_cam  =  e2q(euler_cam);
-            vquat_cam =  quat_cam.coeffs();
-            pose_cam  << pos_cam, vquat_cam;
-
-            // Build problem
-            problem = Problem::create("PO", 3);
-            solver = std::make_shared<SolverCeres>(problem);
-
-            // Install sensor and processor
-            S      = problem->installSensor("SensorCamera", "canonical", pose_cam, wolf_root + "/demos/camera_params_canonical.yaml");
-            camera = std::static_pointer_cast<SensorCamera>(S);
-
-            ParamsProcessorTrackerFeatureTrifocalPtr params_tracker_feature_trifocal_trifocal = std::make_shared<ParamsProcessorTrackerFeatureTrifocal>();
-            params_tracker_feature_trifocal_trifocal->time_tolerance                = 1.0/2;
-            params_tracker_feature_trifocal_trifocal->max_new_features              = 5;
-            params_tracker_feature_trifocal_trifocal->min_features_for_keyframe     = 5;
-            params_tracker_feature_trifocal_trifocal->yaml_file_params_vision_utils = wolf_root + "/demos/processor_tracker_feature_trifocal_vision_utils.yaml";
-            params_tracker_feature_trifocal_trifocal->debug_view                    = false;
-
-            ProcessorBasePtr proc = problem->installProcessor("ProcessorTrackerFeatureTrifocal", "trifocal", camera, params_tracker_feature_trifocal_trifocal);
-            proc_trifocal = std::static_pointer_cast<ProcessorTrackerFeatureTrifocal>(proc);
-
-            // Add three viewpoints with frame, capture and feature
-            pixel_noise_std = 2.0;
-            Vector2d pix(0,0);
-            Matrix2d pix_cov(Matrix2d::Identity() * pow(pixel_noise_std, 2));
-
-            F1 = problem->emplaceFrame(1.0, pose1);
-            I1 = std::static_pointer_cast<CaptureImage>(CaptureBase::emplace<CaptureImage>(F1, 1.0, camera, cv::Mat(2,2,CV_8UC1)));
-            f1 = FeatureBase::emplace<FeatureBase>(I1, "PIXEL", pix, pix_cov); // pixel at origin
-
-            F2 = problem->emplaceFrame(2.0, pose2);
-            I2 = std::static_pointer_cast<CaptureImage>((CaptureBase::emplace<CaptureImage>(F2, 2.0, camera, cv::Mat(2,2,CV_8UC1))));
-            f2 = FeatureBase::emplace<FeatureBase>(I2, "PIXEL", pix, pix_cov); // pixel at origin
-
-            F3 = problem->emplaceFrame(3.0, pose3);
-            I3 = std::static_pointer_cast<CaptureImage>(CaptureBase::emplace<CaptureImage>(F3, 3.0, camera, cv::Mat(2,2,CV_8UC1)));
-            f3 = FeatureBase::emplace<FeatureBase>(I3, "PIXEL", pix, pix_cov); // pixel at origin
-
-            // trifocal factor
-            // c123 = std::make_shared<FactorTrifocal>(f1, f2, f3, proc_trifocal, false, FAC_ACTIVE);
-            c123 = std::static_pointer_cast<FactorTrifocal>(FactorBase::emplace<FactorTrifocal>(f3, f1, f2, f3, proc_trifocal, false, FAC_ACTIVE));
-            // f3   ->addFactor   (c123);
-            // f1   ->addConstrainedBy(c123);
-            // f2   ->addConstrainedBy(c123);
-        }
-};
-
-TEST_F(FactorTrifocalTest, InfoMatrix)
-{
-    /** Ground truth covariance. Rationale:
-     * Due to the orthogonal configuration (see line 40 and onwards), we have:
-     *   Let s = pixel_noise_std.
-     *   Let d = 1 the distance from the cameras to the 3d point
-     *   Let k be a proportionality constant related to the projection and pixellization process
-     *   Let S = k*d*s
-     *   The pixel on camera 1 retroprojects a conic PDF with width S = k*s*d
-     *   The line on camera 2 retroprojects a plane aperture of S = k*s*d
-     *   The product (ie intersection) of cone and plane aperture PDFs is a sphere of radius S
-     *   Projection of the sphere to camera 3 is a circle of S/k/d=s pixels
-     *   This is the projected covariance: s^2 pixel^2
-     *   The measurement has a std dev of s pixel --> cov is s^2 pixel^2
-     *   The total cov is s^2 pix^2 + s^2 pix^2 = 2s^2 pix^2
-     *   The info matrix is 0.5 s^-2 pix^-2
-     *   The sqrt info matrix is 1/s/sqrt(2) pix^-1
-     */
-    Matrix3d sqrt_info_gt = Matrix3d::Identity() / pixel_noise_std / sqrt(2.0);
-
-    ASSERT_MATRIX_APPROX(c123->getSqrtInformationUpper(), sqrt_info_gt, 1e-6);
-
-}
-
-TEST_F(FactorTrifocalTest, expectation)
-{
-    //    Homogeneous transform C2 wrt C1
-    Matrix4d _c1Hc2; _c1Hc2 <<
-               0,  0, -1,  1,
-               0,  1,  0,  0,
-               1,  0,  0,  1,
-               0,  0,  0,  1;
-
-    // rotation and translation
-    Matrix3d _c1Rc2 = _c1Hc2.block(0,0,3,3);
-    Vector3d _c1Tc2 = _c1Hc2.block(0,3,3,1);
-
-    // Essential matrix, ground truth (fwd and bkwd)
-    Matrix3d _c1Ec2 = wolf::skew(_c1Tc2) * _c1Rc2;
-    Matrix3d _c2Ec1 = _c1Ec2.transpose();
-
-    // Expected values
-    vision_utils::TrifocalTensor tensor;
-    Matrix3d c2Ec1;
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-
-    // check trilinearities
-
-    // Elements computed using the tensor
-    Matrix3d T0, T1, T2;
-    tensor.getLayers(T0,T1,T2);
-    Vector3d _m1 (0,0,1),
-             _m2 (0,0,1),
-             _m3 (0,0,1); // ground truth
-    Matrix3d m1Tt = _m1(0)*T0.transpose()
-                  + _m1(1)*T1.transpose()
-                  + _m1(2)*T2.transpose();
-
-    // Projective line: l = (nx ny dn), with (nx ny): normal vector; dn: distance to origin times norm(nx,ny)
-    Vector3d _l2(0,1,0),
-             _p2(1,0,0),
-             _p3(0,1,0); // ground truth
-    Vector3d l2;
-    l2 = c2Ec1 * _m1;
-
-    // check epipolar lines (check only director vectors for equal direction)
-    ASSERT_MATRIX_APPROX(l2/l2(1), _l2/_l2(1), 1e-6);
-
-    // check perpendicular lines (check only director vectors for orthogonal direction)
-    ASSERT_NEAR(l2(0)*_p2(0) + l2(1)*_p2(1), 0, 1e-6);
-
-    // Verify trilinearities
-
-    // Point-line-line
-    Matrix1d pll = _p3.transpose() * m1Tt * _p2;
-    ASSERT_TRUE(pll(0)<1e-5);
-
-    // Point-line-point
-    Vector3d plp = wolf::skew(_m3) * m1Tt * _p2;
-    ASSERT_MATRIX_APPROX(plp, Vector3d::Zero(), 1e-6);
-
-    // Point-point-line
-    Vector3d ppl = _p3.transpose() * m1Tt * wolf::skew(_m2);
-    ASSERT_MATRIX_APPROX(ppl, Vector3d::Zero(), 1e-6);
-
-    // Point-point-point
-    Matrix3d ppp = wolf::skew(_m3) * m1Tt * wolf::skew(_m2);
-    ASSERT_MATRIX_APPROX(ppp, Matrix3d::Zero(), 1e-6);
-
-    // check epipolars
-    ASSERT_MATRIX_APPROX(c2Ec1/c2Ec1(0,1), _c2Ec1/_c2Ec1(0,1), 1e-6);
-}
-
-TEST_F(FactorTrifocalTest, residual)
-{
-    vision_utils::TrifocalTensor tensor;
-    Matrix3d c2Ec1;
-    Vector3d residual;
-
-    // Nominal values
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-    residual = c123->residual(tensor, c2Ec1);
-
-    ASSERT_MATRIX_APPROX(residual, Vector3d::Zero(), 1e-6);
-}
-
-TEST_F(FactorTrifocalTest, error_jacobians)
-{
-    vision_utils::TrifocalTensor tensor;
-    Matrix3d c2Ec1;
-    Vector3d residual, residual_pert;
-    Vector3d pix0, pert, pix_pert;
-    double epsilon = 1e-8;
-
-    // Nominal values
-    c123->expectation(pos1, quat1, pos2, quat2, pos3, quat3, pos_cam, quat_cam, tensor, c2Ec1);
-    residual = c123->residual(tensor, c2Ec1);
-
-    Matrix<double, 3, 3> J_e_m1, J_e_m2, J_e_m3, J_r_m1, J_r_m2, J_r_m3;
-    c123->error_jacobians(tensor, c2Ec1, J_e_m1, J_e_m2, J_e_m3);
-    J_r_m1 = c123->getSqrtInformationUpper() * J_e_m1;
-    J_r_m2 = c123->getSqrtInformationUpper() * J_e_m2;
-    J_r_m3 = c123->getSqrtInformationUpper() * J_e_m3;
-
-    // numerical jacs
-    Matrix<double,3,3> Jn_r_m1, Jn_r_m2, Jn_r_m3;
-
-    // jacs wrt m1
-    pix0 = c123->getPixelCanonical1();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonical1(pix_pert); // m1
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m1.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonical1(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m1, Jn_r_m1, 1e-6);
-
-    // jacs wrt m2
-    pix0 = c123->getPixelCanonical2();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonical2(pix_pert); // m2
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m2.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonical2(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m2, Jn_r_m2, 1e-6);
-
-    // jacs wrt m3
-    pix0 = c123->getPixelCanonical3();
-    for (int i=0; i<3; i++)
-    {
-        pert.setZero();
-        pert(i)  = epsilon;
-        pix_pert = pix0 + pert;
-        c123->setPixelCanonical3(pix_pert); // m3
-        residual_pert = c123->residual(tensor, c2Ec1);
-
-        Jn_r_m3.col(i) = (residual_pert - residual) / epsilon;
-    }
-    c123->setPixelCanonical3(pix0);
-
-    ASSERT_MATRIX_APPROX(J_r_m3, Jn_r_m3, 1e-6);
-
-}
-
-TEST_F(FactorTrifocalTest, operator_parenthesis)
-{
-    Vector3d res;
-
-    // Factor with exact states should give zero residual
-    c123->operator ()(pos1.data(), vquat1.data(),
-                      pos2.data(), vquat2.data(),
-                      pos3.data(), vquat3.data(),
-                      pos_cam.data(), vquat_cam.data(),
-                      res.data());
-
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-}
-
-TEST_F(FactorTrifocalTest, solve_F1)
-{
-    F1->setState("PO", { pose1.head(3), pose1.tail(4) } );
-    F2->setState("PO", { pose2.head(3), pose2.tail(4) } );
-    F3->setState("PO", { pose3.head(3), pose3.tail(4) } );
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-    // Residual with prior
-
-    Vector3d res;
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F1->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-    // Residual with perturbated state
-    F1->perturb(0.1);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", F1->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->unfix();
-    F2->fix();
-    F3->fix();
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F1->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-}
-
-TEST_F(FactorTrifocalTest, solve_F2)
-{
-    F1->setState("PO", { pose1.head(3), pose1.tail(4) } );
-    F2->setState("PO", { pose2.head(3), pose2.tail(4) } );
-    F3->setState("PO", { pose3.head(3), pose3.tail(4) } );
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3d res;
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F2->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-    // Residual with perturbated state
-
-    F2->perturb(0.1);
-
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", F2->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->fix();
-    F2->unfix();
-    F3->fix();
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F2->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-}
-
-TEST_F(FactorTrifocalTest, solve_F3)
-{
-    F1->setState("PO", { pose1.head(3), pose1.tail(4) } );
-    F2->setState("PO", { pose2.head(3), pose2.tail(4) } );
-    F3->setState("PO", { pose3.head(3), pose3.tail(4) } );
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3d res;
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", F3->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-    // Residual with perturbated state
-
-    F3->perturb(0.1);
-
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", F3->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-    ASSERT_NEAR(res(2), 0, 1e-6); // Epipolar c2-c1 should be respected when perturbing F3
-
-    // Residual with solved state
-
-    S ->fixExtrinsics();
-    F1->fix();
-    F2->fix();
-    F3->unfix();
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", F3->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-}
-
-TEST_F(FactorTrifocalTest, solve_S)
-{
-    F1->setState(pose1, "PO", {3,4});
-    F2->setState(pose2, "PO", {3,4});
-    F3->setState(pose3, "PO", {3,4});
-    S ->getP()->setState(pos_cam);
-    S ->getO()->setState(vquat_cam);
-
-    // Residual with prior
-
-    Vector3d res;
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("Initial state:              ", S->getP()->getState().transpose(), " ", S->getO()->getState().transpose());
-    WOLF_DEBUG("residual before perturbing: ", res.transpose());
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-    // Residual with perturbated state
-
-    S->perturb(0.1);
-
-    S_p = S->getP()->getState();
-    S_o = S->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("perturbed state:            ", S->getState().vector("PO").transpose());
-    WOLF_DEBUG("residual before solve:      ", res.transpose());
-
-    // Residual with solved state
-
-    S ->unfixExtrinsics();
-    F1->fix();
-    F2->fix();
-    F3->fix();
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    F1_p = F1->getP()->getState();
-    F1_o = F1->getO()->getState();
-    F2_p = F2->getP()->getState();
-    F2_o = F2->getO()->getState();
-    F3_p = F3->getP()->getState();
-    F3_o = F3->getO()->getState();
-    S_p  = S ->getP()->getState();
-    S_o  = S ->getO()->getState();
-
-    c123->operator ()(F1_p.data(), F1_o.data(),
-                      F2_p.data(), F2_o.data(),
-                      F3_p.data(), F3_o.data(),
-                      S_p. data(), S_o. data(),
-                      res.data());
-
-    WOLF_DEBUG("solved state:               ", S->getP()->getState().transpose(), " ", S->getO()->getState().transpose());
-    WOLF_DEBUG("residual after solve:       ", res.transpose());
-
-    WOLF_DEBUG(report, " AND UNION");
-
-    ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-
-}
-
-class FactorTrifocalMultiPointTest : public FactorTrifocalTest
-{
-        /*
-         * In this test class we add 8 more points and perform optimization on the camera frames.
-         *
-         * C1 is not optimized as it acts as the reference
-         * S  is not optimized either as observability is compromised
-         * C2.pos is fixed to set the unobservable scale
-         * C2.ori and all C3 are optimized
-         *
-         */
-
-    public:
-        std::vector<FeatureBasePtr> fv1, fv2, fv3;
-        std::vector<FactorTrifocalPtr> cv123;
-
-        void SetUp() override
-        {
-            FactorTrifocalTest::SetUp();
-
-            Matrix<double, 2, 9> c1p_can;
-            c1p_can <<
-                    0,   -1/3.00,   -1/3.00,    1/3.00,    1/3.00,   -1.0000,   -1.0000, 1.0000,    1.0000,
-                    0,    1/3.00,   -1/3.00,    1/3.00,   -1/3.00,    1.0000,   -1.0000, 1.0000,   -1.0000;
-
-            Matrix<double, 2, 9> c2p_can;
-            c2p_can <<
-                    0,    1/3.00,    1/3.00,    1.0000,    1.0000,   -1/3.00,   -1/3.00,   -1.0000,   -1.0000,
-                    0,    1/3.00,   -1/3.00,    1.0000,   -1.0000,    1/3.00,   -1/3.00,    1.0000,   -1.0000;
-
-            Matrix<double, 2, 9> c3p_can;
-            c3p_can <<
-                    0,   -1/3.00,   -1.0000,    1/3.00,    1.0000,   -1/3.00,   -1.0000,   1/3.00,    1.0000,
-                    0,   -1/3.00,   -1.0000,   -1/3.00,   -1.0000,    1/3.00,    1.0000,   1/3.00,    1.0000;
-
-            Matrix2d pix_cov; pix_cov.setIdentity(); //pix_cov *= 1e-4;
-
-            // for i==0 we already have them
-            fv1.push_back(f1);
-            fv2.push_back(f2);
-            fv3.push_back(f3);
-            cv123.push_back(c123);
-            for (size_t i=1; i<c1p_can.cols() ; i++)
-            {
-                // fv1.push_back(std::make_shared<FeatureBase>("PIXEL", c1p_can.col(i), pix_cov));
-                auto f  = FeatureBase::emplace<FeatureBase>(I1, "PIXEL", c1p_can.col(i), pix_cov);
-                fv1.push_back(f);
-                // I1->addFeature(fv1.at(i));
-
-                // fv2.push_back(std::make_shared<FeatureBase>("PIXEL", c2p_can.col(i), pix_cov));
-                auto f2  = FeatureBase::emplace<FeatureBase>(I2, "PIXEL", c2p_can.col(i), pix_cov);
-                fv2.push_back(f2);
-                // I2->addFeature(fv2.at(i));
-
-                // fv3.push_back(std::make_shared<FeatureBase>("PIXEL", c3p_can.col(i), pix_cov));
-                auto f3  = FeatureBase::emplace<FeatureBase>(I3, "PIXEL", c3p_can.col(i), pix_cov);
-                fv3.push_back(f3);
-                // I3->addFeature(fv3.at(i));
-
-                auto ff = std::static_pointer_cast<FactorTrifocal>(FactorBase::emplace<FactorTrifocal>(fv3.at(i), fv1.at(i), fv2.at(i), fv3.at(i), proc_trifocal, false, FAC_ACTIVE));
-                cv123.push_back(ff);
-                // fv3.at(i)->addFactor(cv123.at(i));
-                // fv1.at(i)->addConstrainedBy(cv123.at(i));
-                // fv2.at(i)->addConstrainedBy(cv123.at(i));
-            }
-
-        }
-
-};
-
-TEST_F(FactorTrifocalMultiPointTest, solve_multi_point)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2.pos is fixed to set the unobservable scale
-     * C2.ori and all C3 are optimized
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->fix(); // This fixes the scale
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, keep scale
-    problem->perturb(0.1);
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::FULL);
-
-    // Print results
-    WOLF_INFO("report: ", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), pos2  , 1e-6);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(), vquat2, 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), pos3  , 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(), vquat3, 1e-6);
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3d res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-    }
-
-}
-
-TEST_F(FactorTrifocalMultiPointTest, solve_multi_point_scale)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2.pos is fixed to set the unobservable scale
-     * C2.ori and all C3 are optimized
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->fix(); // This fixes the scale
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, change scale
-//    problem->perturb(0.1);
-    F1->getP()->setState( 2 * pos1 );
-    F1->getO()->setState(   vquat1 );
-    F2->getP()->setState( 2 * pos2 );
-    F2->getO()->setState((  vquat2 + 0.1*Vector4d::Random()).normalized());
-    F3->getP()->setState( 2 * pos3 + 0.1*Vector3d::Random());
-    F3->getO()->setState((  vquat3 + 0.1*Vector4d::Random()).normalized());
-
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    // Print results
-    WOLF_DEBUG("report: ", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), 2 * pos2, 1e-6);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(),   vquat2, 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), 2 * pos3, 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(),   vquat3, 1e-6);
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3d res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-    }
-}
-
-#include <core/factor/factor_distance_3d.h>
-
-TEST_F(FactorTrifocalMultiPointTest, solve_multi_point_distance)
-{
-    /*
-     * In this test we add 8 more points and perform optimization on the camera frames.
-     *
-     * C1 is not optimized as it acts as the reference
-     * S  is not optimized either as observability is compromised
-     * C2 and C3 are optimized
-     * The scale is observed through a distance factor
-     *
-     */
-
-    S ->getP()->fix(); // do not calibrate sensor pos
-    S ->getO()->fix(); // do not calibrate sensor ori
-    F1->getP()->fix(); // Cam 1 acts as reference
-    F1->getO()->fix(); // Cam 1 acts as reference
-    F2->getP()->unfix(); // Estimate Cam 2 pos
-    F2->getO()->unfix(); // Estimate Cam 2 ori
-    F3->getP()->unfix(); // Estimate Cam 3 pos
-    F3->getO()->unfix(); // Estimate Cam 3 ori
-
-    // Perturbate states, change scale
-//    problem->perturb(0.1);
-    F1->getP()->setState( pos1   );
-    F1->getO()->setState( vquat1 );
-    F2->getP()->setState( pos2   + 0.2*Vector3d::Random() );
-    F2->getO()->setState((vquat2 + 0.2*Vector4d::Random()).normalized());
-    F3->getP()->setState( pos3   + 0.2*Vector3d::Random());
-    F3->getO()->setState((vquat3 + 0.2*Vector4d::Random()).normalized());
-
-    // Add a distance factor to fix the scale
-    double distance     = sqrt(2.0);
-    double distance_std = 0.1;
-
-    auto Cd = CaptureBase::emplace<CaptureBase>(F3, "DISTANCE", F3->getTimeStamp());
-    // CaptureBasePtr Cd = std::make_shared<CaptureBase>("DISTANCE", F3->getTimeStamp());
-    // F3->addCapture(Cd);
-    auto fd = FeatureBase::emplace<FeatureBase>(Cd, "DISTANCE", Vector1d(distance), Matrix1d(distance_std * distance_std));
-    // FeatureBasePtr fd = std::make_shared<FeatureBase>("DISTANCE", Vector1d(distance), Matrix1d(distance_std * distance_std));
-    // Cd->addFeature(fd);
-    auto cd = FactorBase::emplace<FactorDistance3d>(fd, fd, F1, nullptr, false, FAC_ACTIVE);
-    // FactorDistance3dPtr cd = std::make_shared<FactorAutodiffDistance3d>(fd, F1, nullptr, false, FAC_ACTIVE);
-    // fd->addFactor(cd);
-    // F1->addConstrainedBy(cd);
-
-    cd->setStatus(FAC_INACTIVE);
-    std::string report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-    WOLF_DEBUG("DISTANCE CONSTRAINT INACTIVE: \n", report);
-
-    problem->print(1,0,1,0);
-
-    cd->setStatus(FAC_ACTIVE);
-    report = solver->solve(SolverManager::ReportVerbosity::BRIEF);
-
-    // Print results
-    WOLF_DEBUG("DISTANCE CONSTRAINT ACTIVE: \n", report);
-    problem->print(1,0,1,0);
-
-    // Evaluate final states
-    ASSERT_MATRIX_APPROX(F2->getP()->getState(), pos2  , 1e-6);
-    ASSERT_MATRIX_APPROX(F2->getO()->getState(), vquat2, 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getP()->getState(), pos3  , 1e-6);
-    ASSERT_MATRIX_APPROX(F3->getO()->getState(), vquat3, 1e-6);
-
-    Eigen::VectorXd F1_p = F1->getP()->getState();
-    Eigen::VectorXd F1_o = F1->getO()->getState();
-    Eigen::VectorXd F2_p = F2->getP()->getState();
-    Eigen::VectorXd F2_o = F2->getO()->getState();
-    Eigen::VectorXd F3_p = F3->getP()->getState();
-    Eigen::VectorXd F3_o = F3->getO()->getState();
-    Eigen::VectorXd S_p  = S ->getP()->getState();
-    Eigen::VectorXd S_o  = S ->getO()->getState();
-
-    // evaluate residuals
-    Vector3d res;
-    for (size_t i=0; i<cv123.size(); i++)
-    {
-        cv123.at(i)->operator ()(F1_p.data(), F1_o.data(),
-                                 F2_p.data(), F2_o.data(),
-                                 F3_p.data(), F3_o.data(),
-                                 S_p. data(), S_o. data(),
-                                 res.data());
-
-        ASSERT_MATRIX_APPROX(res, Vector3d::Zero(), 1e-6);
-    }
-}
-
-int main(int argc, char **argv)
-{
-    testing::InitGoogleTest(&argc, argv);
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalTest.solve_F1";
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalTest.solve_F2";
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalTest.solve_F3";
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalTest.solve_S";
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalTest.solve_multi_point";
-    //    ::testing::GTEST_FLAG(filter) = "FactorTrifocalMultiPointTest.solve_multi_point_distance";
-    return RUN_ALL_TESTS();
-}
-
diff --git a/test/gtest_feature_point_image.cpp b/test/gtest_feature_point_image.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef5c2adb6fc020e86cc228770765662acbdb9f4b
--- /dev/null
+++ b/test/gtest_feature_point_image.cpp
@@ -0,0 +1,76 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+
+
+#include <string>
+#include <core/utils/utils_gtest.h>
+#include <core/sensor/sensor_base.h>
+
+#include "vision/feature/feature_point_image.h"
+
+using namespace wolf;
+using namespace cv;
+
+class FeaturePointImage_test : public testing::Test
+{
+    public:
+        cv::KeyPoint cv_kp_;
+        WKeyPoint kp_;
+        Eigen::Vector2d pix_;
+        std::string object_type_;
+        Eigen::Matrix2d cov_;
+        
+        void SetUp() override
+        {
+            cv_kp_ = cv::KeyPoint(0.0, 1.0, 0);
+            kp_ = WKeyPoint(cv_kp_);
+            cov_ = Eigen::Matrix2d::Identity();
+        }
+};
+
+TEST_F(FeaturePointImage_test, constructor)
+{
+    FeaturePointImagePtr f1 = std::make_shared<FeaturePointImage>(kp_, cov_);
+    ASSERT_EQ(f1->getType(), "FeaturePointImage");
+}
+
+TEST_F(FeaturePointImage_test, getter_setters)
+{
+    FeaturePointImagePtr f1 = std::make_shared<FeaturePointImage>(kp_, cov_);
+    ASSERT_EQ(f1->getMeasurement()(0), 0.0);
+
+    cv::KeyPoint cv_kp_bis = cv::KeyPoint(2.0, 3.0, 0);
+    WKeyPoint kp_bis(cv_kp_bis);
+    f1->setKeyPoint(kp_bis);
+
+    // setting the keypoint changes the feature measurement as it should
+    ASSERT_EQ(f1->getMeasurement()(0), 2.0);
+
+}
+
+
+int main(int argc, char **argv)
+{
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
+
diff --git a/test/gtest_processor_tracker_feature_trifocal.cpp b/test/gtest_processor_tracker_feature_trifocal.cpp
deleted file mode 100644
index 3b5117a9b8d990c18a3aa76f49b80972aed24e3a..0000000000000000000000000000000000000000
--- a/test/gtest_processor_tracker_feature_trifocal.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-//--------LICENSE_START--------
-//
-// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
-// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
-// All rights reserved.
-//
-// This file is part of WOLF
-// WOLF is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program.  If not, see <http://www.gnu.org/licenses/>.
-//
-//--------LICENSE_END--------
-
-#include "vision/processor/processor_tracker_feature_trifocal.h"
-#include "vision/capture/capture_image.h"
-#include "vision/sensor/sensor_camera.h"
-#include "vision/internal/config.h"
-
-#include <core/utils/utils_gtest.h>
-#include "core/common/wolf.h"
-#include "core/utils/logging.h"
-#include "core/processor/processor_odom_3d.h"
-
-#include "vision_utils/vision_utils.h"
-
-using namespace Eigen;
-using namespace wolf;
-
-// Use the following in case you want to initialize tests with predefines variables or methods.
-//class ProcessorTrackerFeatureTrifocal_class : public testing::Test{
-//    public:
-//        virtual void SetUp()
-//        {
-//        }
-//};
-
-//TEST(ProcessorTrackerFeatureTrifocal, Constructor)
-//{
-//  std::cout << "\033[1;33m [WARN]:\033[0m gtest for ProcessorTrackerFeatureTrifocal Constructor is empty." << std::endl;
-//}
-
-//TEST(ProcessorTrackerFeatureTrifocal, Destructor)
-//{
-//  std::cout << "\033[1;33m [WARN]:\033[0m gtest for ProcessorTrackerFeatureTrifocal Destructor is empty." << std::endl;
-//}
-
-////[Class methods]
-//TEST(ProcessorTrackerFeatureTrifocal, trackFeatures)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal trackFeatures is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, correctFeatureDrift)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal correctFeatureDrift is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, voteForKeyFrame)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal voteForKeyFrame is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, detectNewFeatures)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal detectNewFeatures is empty." << std::endl;
-//}
-//
-//TEST(ProcessorTrackerFeatureTrifocal, createFactor)
-//{
-//  std::cout << "033[1;33m [WARN]:033[0m gtest for ProcessorTrackerFeatureTrifocal createFactor is empty." << std::endl;
-//}
-
-TEST(ProcessorTrackerFeatureTrifocal, KeyFrameCallback)
-{
-
-    using namespace wolf;
-    using std::shared_ptr;
-    using std::make_shared;
-    using std::static_pointer_cast;
-    using Eigen::Vector2d;
-
-    std::string wolf_root = _WOLF_VISION_ROOT_DIR;
-
-    double dt = 0.01;
-
-    // Wolf problem
-    ProblemPtr problem = Problem::create("PO", 3);
-
-    // Install tracker (sensor and processor)
-    ParamsSensorCameraPtr intr = make_shared<ParamsSensorCamera>(); // TODO init params or read from YAML
-    intr->width  = 640;
-    intr->height = 480;
-
-    auto sens_trk = SensorBase::emplace<SensorCamera>(problem->getHardware(), (Eigen::Vector7d()<<0,0,0, 0,0,0,1).finished(),
-                                                      intr);
-    ParamsProcessorTrackerFeatureTrifocalPtr params_tracker_feature_trifocal = std::make_shared<ParamsProcessorTrackerFeatureTrifocal>();
-    params_tracker_feature_trifocal->pixel_noise_std                = 1.0;
-    params_tracker_feature_trifocal->voting_active                  = true;
-    params_tracker_feature_trifocal->min_features_for_keyframe      = 5;
-    params_tracker_feature_trifocal->time_tolerance                 = dt/2;
-    params_tracker_feature_trifocal->min_features_for_keyframe      = 5;
-    params_tracker_feature_trifocal->pixel_noise_std                = 1.0;
-    params_tracker_feature_trifocal->max_new_features               = 5;
-    params_tracker_feature_trifocal->min_response_new_feature       = 25;
-    params_tracker_feature_trifocal->n_cells_h                      = 10;
-    params_tracker_feature_trifocal->n_cells_v                      = 10;
-    params_tracker_feature_trifocal->yaml_file_params_vision_utils  = wolf_root + "/demos/processor_tracker_feature_trifocal_vision_utils.yaml";
-    params_tracker_feature_trifocal->debug_view                     = false;
-
-    // ProcessorTrackerFeatureTrifocalPtr proc_trk = make_shared<ProcessorTrackerFeatureTrifocal>(params_tracker_feature_trifocal);
-    auto proc_trk = std::static_pointer_cast<ProcessorTrackerFeatureTrifocal>(ProcessorBase::emplace<ProcessorTrackerFeatureTrifocal>(sens_trk, params_tracker_feature_trifocal));
-    proc_trk->configure(sens_trk);
-
-    // problem->addSensor(sens_trk);
-    // sens_trk->addProcessor(proc_trk);
-
-    // Install odometer (sensor and processor)
-    ParamsSensorOdom3dPtr params = std::make_shared<ParamsSensorOdom3d>();
-    params->min_disp_var = 0.000001;
-    params->min_rot_var  = 0.000001;
-    SensorBasePtr sens_odo = problem->installSensor("SensorOdom3d", "odometer", (Vector7d() << 0,0,0,  0,0,0,1).finished(), params);
-    ParamsProcessorOdom3dPtr proc_odo_params = make_shared<ParamsProcessorOdom3d>();
-    proc_odo_params->voting_active   = true;
-    proc_odo_params->time_tolerance  = dt/2;
-    proc_odo_params->max_buff_length = 3;
-    ProcessorBasePtr proc_odo = problem->installProcessor("ProcessorOdom3d", "odometer", sens_odo, proc_odo_params);
-
-    std::cout << "sensor & processor created and added to wolf problem" << std::endl;
-
-    // Sequence to test KeyFrame creations (callback calls)
-
-    // initialize
-    TimeStamp   t(0.0);
-    VectorComposite x("PO", {Vector3d::Zero(), Quaterniond::Identity().coeffs()});
-    VectorComposite s("PO", {1e-3*Vector3d::Ones(), 1e-3*Vector3d::Ones()});
-    auto KF1 = problem->setPriorFactor(x, s, t);             // KF1
-    std::static_pointer_cast<ProcessorOdom3d>(proc_odo)->setOrigin(KF1);
-
-    MatrixXd P = (s.vector("PO").array() * s.vector("PO").array()).matrix().asDiagonal();
-    CaptureOdom3dPtr capt_odo = make_shared<CaptureOdom3d>(t, sens_odo, Vector6d::Zero(), P);
-
-    // Track
-    cv::Mat image(intr->height, intr->width, CV_8UC3); // OpenCV cv::Mat(rows, cols)
-    image *= 0; // TODO see if we want to use a real image
-    SensorCameraPtr sens_trk_cam = std::static_pointer_cast<SensorCamera>(sens_trk);
-    CaptureImagePtr capt_trk = make_shared<CaptureImage>(t, sens_trk_cam, image);
-    sens_trk_cam->process(capt_trk);
-
-    problem->print(2,0,1,0);
-
-    for (size_t ii=0; ii<8; ii++ )
-    {
-        // Move
-        t = t+dt;
-        WOLF_INFO("----------------------- ts: ", t , " --------------------------");
-
-        capt_odo->setTimeStamp(t);
-        sens_odo->process(capt_odo);
-
-        // Track
-        capt_trk = make_shared<CaptureImage>(t, sens_trk_cam, image);
-        sens_trk_cam->process(capt_trk);
-
-        problem->print(2,0,1,0);
-
-        // Only odom creating KFs
-//        ASSERT_TRUE( problem->getLastKeyFrame()->getType().compare("PO 3d")==0 );
-    }
-}
-
-int main(int argc, char **argv)
-{
-    testing::InitGoogleTest(&argc, argv);
-    return RUN_ALL_TESTS();
-}
-
diff --git a/test/gtest_processor_visual_odometry.cpp b/test/gtest_processor_visual_odometry.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0828b2cd8981bbf7ef91e7f09bc739577c28b618
--- /dev/null
+++ b/test/gtest_processor_visual_odometry.cpp
@@ -0,0 +1,421 @@
+//--------LICENSE_START--------
+//
+// Copyright (C) 2020,2021,2022 Institut de Robòtica i Informàtica Industrial, CSIC-UPC.
+// Authors: Joan Solà Ortega (jsola@iri.upc.edu)
+// All rights reserved.
+//
+// This file is part of WOLF
+// WOLF is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+//
+//--------LICENSE_END--------
+/**
+ * \file gtest_preprocess.cpp
+ *
+ *  Created on: March 31, 2022
+ *      \author: cdebeunne
+ */
+
+#include <string>
+#include <core/utils/utils_gtest.h>
+#include <core/sensor/sensor_base.h>
+#include <core/yaml/parser_yaml.h>
+#include <opencv2/imgcodecs.hpp>
+
+#include "core/math/rotations.h"
+
+#include "vision/processor/processor_visual_odometry.h"
+#include "vision/sensor/sensor_camera.h"
+#include "vision/factor/factor_pixel_hp.h"
+#include "vision/landmark/landmark_hp.h"
+#include "vision/capture/capture_image.h"
+#include "vision/internal/config.h"
+
+using namespace wolf;
+using namespace cv;
+
+std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
+
+class ProcessorVisualOdometryTest : public ProcessorVisualOdometry
+{
+	public:
+
+		ProcessorVisualOdometryTest(ParamsProcessorVisualOdometryPtr& _params_vo):
+		    ProcessorVisualOdometry(_params_vo)
+	    {
+
+	    }
+
+        cv::Ptr<cv::FeatureDetector> getDetector()
+        {
+            return detector_;
+        }
+
+        TrackMatrix getTrackMatrix()
+		{
+			return track_matrix_;
+		}
+
+        void setCaptureLast(CaptureImagePtr capture_image_last){
+            last_ptr_ = capture_image_last;
+            capture_image_last_ = capture_image_last;
+        }
+
+        void setCaptureIncoming(CaptureImagePtr capture_image_incoming){
+            incoming_ptr_ = capture_image_incoming;
+            capture_image_incoming_ = capture_image_incoming;
+        }
+
+        void setCaptureOrigin(CaptureImagePtr capture_image_origin){
+            origin_ptr_ = capture_image_origin;
+            capture_image_origin_ = capture_image_origin;
+        }
+};
+
+TEST(ProcessorVisualOdometry, kltTrack)
+{
+    cv::Mat img = cv::imread(wolf_vision_root + "/test/markers.jpg", cv::IMREAD_GRAYSCALE);
+    cv::Mat img_flow = cv::imread(wolf_vision_root + "/test/markers.jpg", cv::IMREAD_GRAYSCALE);
+    // Create an image with a predefined optical flow
+    int du = 5;
+    int dv = 5;
+    for (int x=0; x<img.size().height; x++){
+        for (int y=0; y<img.size().width; y++){
+            if (x+du < img.size().height && y+dv<img.size().width){
+                img_flow.at<uchar>(x,y) = img.at<uchar>(x+du,y+dv);
+            } else {
+                img_flow.at<uchar>(x,y) = 255;
+            }
+        }
+    }
+
+    // Create a processor
+    ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
+    params->klt.patch_height = 21;
+    params->klt.patch_width = 21;
+    params->klt.nlevels_pyramids = 3;
+    params->klt.max_err = 1.0;
+    params->klt.criteria = cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01);
+    params->fast.threshold = 30;
+    params->fast.non_max_suppresion = true;
+
+    ProcessorVisualOdometryTest processor(params);
+    cv::Ptr<cv::FeatureDetector> detector = processor.getDetector();
+    
+    std::vector<cv::KeyPoint> kps;
+    detector->detect(img, kps);
+
+
+    // Create WkpMap 
+    KeyPointsMap mwkps, mwkps_flow;
+    for (auto kp : kps){
+        WKeyPoint wkp(kp);
+        mwkps[wkp.getId()] = wkp;
+    }
+    TracksMap tracks_flow = processor.kltTrack(img, img_flow, mwkps, mwkps_flow);
+    Eigen::Vector2d delta(static_cast<float>(du), static_cast<float>(dv));
+    for (auto track : tracks_flow){
+        float du_flow = mwkps[track.first].getCvKeyPoint().pt.x - mwkps_flow[track.second].getCvKeyPoint().pt.x;
+        float dv_flow = mwkps[track.first].getCvKeyPoint().pt.y - mwkps_flow[track.second].getCvKeyPoint().pt.y;
+        Eigen::Vector2d delta_flow(du_flow,dv_flow);
+        ASSERT_MATRIX_APPROX(delta, delta_flow, 0.1);
+    }
+
+}
+
+//TEST(ProcessorVisualOdometry, preProcess)
+//{
+//    // Create an image pair
+//    cv::Mat img_0 = cv::imread(wolf_vision_root + "/test/demo_gazebo_x00cm_y00cm.jpg", cv::IMREAD_GRAYSCALE);
+//    cv::Mat img_1 = cv::imread(wolf_vision_root + "/test/demo_gazebo_x00cm_y-20cm.jpg", cv::IMREAD_GRAYSCALE);
+//
+//    // Create a processor
+//    ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
+//    params->klt_params.patch_height = 21;
+//    params->klt_params.patch_width = 21;
+//    params->klt_params.nlevels_pyramids = 3;
+//    params->klt_params.klt_max_err = 0.2;
+//    params->klt_params.criteria = cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01);
+//    params->fast_params.threshold_fast = 20;
+//    params->fast_params.non_max_suppresion = true;
+//    params->min_features_for_keyframe = 50;
+//    params->max_nb_tracks = 400;
+//    params->min_track_length_for_landmark = 4;
+//    ProcessorVisualOdometryTest processor(params);
+//
+//
+//    // Install camera
+//    ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
+//    intr->pinhole_model_raw = Eigen::Vector4d(640, 480, 640, 640);
+//    intr->width  = 1280;
+//    intr->height = 960;
+//    SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0,  0,0,0,1).finished(), intr);
+//    processor.configure(cam_ptr);
+//
+//    // ----------------------------------------
+//    // TIME 0 : Let's process the first capture
+//    // ----------------------------------------
+//
+//    TimeStamp t0(0.0);
+//
+//    // Create Capture
+//    CaptureImagePtr capture_image_incoming = std::make_shared<CaptureImage>(t0, cam_ptr, img_0);
+//    processor.setCaptureIncoming(capture_image_incoming);
+//
+//    // PreProcess
+//    processor.preProcess();
+//
+//    // Kps visu
+//    cv::Mat img_draw;
+//    std::vector<cv::KeyPoint> kps;
+//    for (auto mwkp : capture_image_incoming->getKeyPoints()){
+//        kps.push_back(mwkp.second.getCvKeyPoint());
+//    }
+//    // cv::drawKeypoints(img_0, kps, img_draw);
+//    // cv::imshow( "KeyPoints t = 0", img_draw);
+//    // cv::waitKey(0);
+//
+//    // ----------------------------------------
+//    // TIME 1 : Let's process the other one
+//    // ----------------------------------------
+//
+//    TimeStamp t1(0.1);
+//
+//    // Edit captures
+//    CaptureImagePtr capture_image_last = capture_image_incoming;
+//    capture_image_incoming = std::make_shared<CaptureImage>(t1, cam_ptr, img_1);
+//    processor.setCaptureIncoming(capture_image_incoming);
+//    processor.setCaptureLast(capture_image_last);
+//    processor.setCaptureOrigin(capture_image_last);
+//
+//    processor.preProcess();
+//
+//    // Kps visu
+//    kps.clear();
+//    for (auto mwkp : capture_image_incoming->getKeyPoints()){
+//        kps.push_back(mwkp.second.getCvKeyPoint());
+//    }
+//    // cv::drawKeypoints(img_1, kps, img_draw);
+//    // cv::imshow( "KeyPoints t = 1", img_draw);
+//    // cv::waitKey(0);
+//
+//    // Check if 80% of tracks between frames
+//    float track_ratio = static_cast<float>(capture_image_incoming->getTracksPrev().size()) /
+//                        static_cast<float>(capture_image_incoming->getKeyPoints().size());
+//    ASSERT_TRUE(track_ratio > 0.8);
+//
+//    // Check if tracks_prev and tracks_origin are similar
+//    ASSERT_EQ(capture_image_incoming->getTracksPrev().size(), capture_image_incoming->getTracksOrigin().size());
+//
+//}
+//
+//TEST(ProcessorVisualOdometry, process)
+//{
+//    // Config file to parse. Here is where all the problem is defined:
+//    std::string wolf_vision_root = _WOLF_VISION_ROOT_DIR;
+//    std::string config_file = "test/gtest_visual_odometry.yaml";
+//
+//    // parse file into params server: each param will be retrievable from this params server:
+//    ParserYaml parser       = ParserYaml(config_file, wolf_vision_root);
+//    ParamsServer server     = ParamsServer(parser.getParams());
+//    // Wolf problem: automatically build the left branch of the wolf tree from the contents of the params server:
+//    ProblemPtr problem      = Problem::autoSetup(server);
+//
+//    // Get sensor and processor
+//    SensorCameraPtr cam_ptr = std::dynamic_pointer_cast<SensorCamera>(problem->findSensor("sen cam"));
+//    ProcessorVisualOdometryPtr proc_vo_ptr;
+//    for (auto proc : problem->findSensor("sen cam")->getProcessorList()){
+//        proc_vo_ptr = std::dynamic_pointer_cast<ProcessorVisualOdometry>(proc);
+//    }
+//
+//    // Successive images
+//    cv::Mat img_0 = cv::imread(wolf_vision_root + "/test/demo_gazebo_x00cm_y00cm.jpg", cv::IMREAD_GRAYSCALE);
+//    cv::Mat img_1 = cv::imread(wolf_vision_root + "/test/demo_gazebo_x00cm_y-10cm.jpg", cv::IMREAD_GRAYSCALE);
+//    cv::Mat img_2 = cv::imread(wolf_vision_root + "/test/demo_gazebo_x00cm_y-20cm.jpg", cv::IMREAD_GRAYSCALE);
+//
+//
+//    // ----------------------------------------
+//    // TIME 0 : Let's process the first capture
+//    // ----------------------------------------
+//
+//    TimeStamp t0(0.0);
+//    CaptureImagePtr capture_image_0 = std::make_shared<CaptureImage>(t0, cam_ptr, img_0);
+//    capture_image_0->process();
+//    problem->print(4,0,1,0);
+//
+//    ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(), 0);
+//	ASSERT_EQ(problem->getMap()->getLandmarkList().size(), 0);
+//
+//    // ----------------------------------------
+//    // TIME 1 : Second image
+//    // ----------------------------------------
+//
+//    TimeStamp t1(0.1);
+//    CaptureImagePtr capture_image_1 = std::make_shared<CaptureImage>(t1, cam_ptr, img_1);
+//    capture_image_1->process();
+//
+//    ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_1->getTracksPrev().size());
+//    // Check if tracks_prev and tracks_origin are similar
+//    ASSERT_EQ(capture_image_1->getTracksPrev().size(), capture_image_1->getTracksOrigin().size());
+//
+//    // ----------------------------------------
+//    // TIME 3 : Third image
+//    // ----------------------------------------
+//
+//    TimeStamp t2(0.2);
+//    CaptureImagePtr capture_image_2 = std::make_shared<CaptureImage>(t2, cam_ptr, img_2);
+//    capture_image_2->process();
+//
+//    ASSERT_EQ(proc_vo_ptr->getTrackMatrix().numTracks(),capture_image_2->getTracksOrigin().size());
+//}
+
+TEST(ProcessorVisualOdometry, mergeTracks)
+{
+    TracksMap tracks_orig_last, tracks_orig_inco, tracks_last_inco;
+
+    /* tested tracks:
+     * O - L - I
+     * ---------
+     * 1 - 2 - x
+     * 3 - 4 - 5
+     * 6 - 7 - 8
+     * 9 - 10 - x
+     * x - 11 - 12
+     * x - 13 - x
+     * x - x - 14
+     * 15 - x - 16
+     */
+    tracks_orig_last[1] = 2;
+    tracks_orig_last[3] = 4;
+    tracks_orig_last[6] = 7;
+    tracks_orig_last[9] = 10;
+
+    tracks_last_inco[4] = 5;
+    tracks_last_inco[7] = 8;
+    tracks_last_inco[11] = 12;
+
+    tracks_orig_inco = ProcessorVisualOdometry::mergeTracks(tracks_orig_last, tracks_last_inco);
+
+    // correct tracks
+    ASSERT_EQ(tracks_orig_inco.size(), 2);
+    ASSERT_EQ(tracks_orig_inco[3], 5);
+    ASSERT_EQ(tracks_orig_inco[6], 8);
+
+    // tracks that should not be there
+    ASSERT_EQ(tracks_orig_inco.count(1), 0);
+    ASSERT_EQ(tracks_orig_inco.count(9), 0);
+    ASSERT_EQ(tracks_orig_inco.count(15), 0);
+    ASSERT_EQ(tracks_orig_inco.count(11), 0);
+    ASSERT_EQ(tracks_orig_inco.count(13), 0);
+
+}
+
+cv::Point2f project(Eigen::Matrix3f K, Eigen::Vector3f p){
+        Eigen::Vector3f ph = K * p;
+        ph = ph / ph(2,0);
+        return cv::Point2f(ph(0), ph(1));
+}
+
+TEST(ProcessorVisualOdometry, filterWithEssential)
+{
+    KeyPointsMap mwkps_prev, mwkps_curr;
+    TracksMap tracks_prev_curr;
+    cv::Mat E;
+
+    // Create a simple camera model
+    Eigen::Matrix3f K;
+    K << 100, 0, 240,
+         0, 100, 240,
+         0, 0, 1;
+    
+    // Create a processor
+    ParamsProcessorVisualOdometryPtr params = std::make_shared<ParamsProcessorVisualOdometry>();
+    params->grid.margin = 10;
+    params->grid.nbr_cells_h = 8;
+    params->grid.nbr_cells_v = 8;
+    params->grid.separation = 10;
+    params->ransac.prob = 0.999;  // 0.99 makes the gtest fail -> missing one point
+    params->ransac.thresh = 1.0;
+    ProcessorVisualOdometryTest processor(params);
+
+    // Install camera
+    ParamsSensorCameraPtr intr = std::make_shared<ParamsSensorCamera>();
+    intr->pinhole_model_raw = Eigen::Vector4d(100, 100, 240, 240);
+    intr->width  = 480;
+    intr->height = 480;
+    SensorCameraPtr cam_ptr = std::make_shared<SensorCamera>((Eigen::Vector7d() << 0,0,0,  0,0,0,1).finished(), intr);
+    processor.configure(cam_ptr);
+
+    // Set 3D points on the previous view
+    Eigen::Vector3f X1_prev(1.0, 1.0, 2.0);
+    Eigen::Vector3f X2_prev(-1.0, -1.0, 2.0);
+    Eigen::Vector3f X3_prev(-0.75, -0.75, 3.0);
+    Eigen::Vector3f X4_prev(-0.75, 0.75, 2.5);
+    Eigen::Vector3f X5_prev(0.75, -0.75, 2.0);
+    Eigen::Vector3f X6_prev(0.0, 1.0, 2.0);
+    Eigen::Vector3f X7_prev(0.1, -1.0, 3.0);
+    Eigen::Vector3f X8_prev(0.75, 0.75, 2.0);
+
+    // Project pixels in the previous view
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,X1_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,X2_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,X3_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,X4_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,X5_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,X6_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,X7_prev), 1))));
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,X8_prev), 1))));
+
+    // Set the transformation between the two views
+    Eigen::Vector3f t(0.1, 0.1, 0.0);
+    // Eigen::Vector3f euler(0.0, 0.0, 0.0);  // degenerate case!
+    Eigen::Vector3f euler(0.2, 0.1, 0.3);
+    Eigen::Matrix3f R = e2R(euler);
+    
+
+    // Project pixels in the current view
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(0, WKeyPoint(cv::KeyPoint(project(K,R*X1_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(1, WKeyPoint(cv::KeyPoint(project(K,R*X2_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(2, WKeyPoint(cv::KeyPoint(project(K,R*X3_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(3, WKeyPoint(cv::KeyPoint(project(K,R*X4_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(4, WKeyPoint(cv::KeyPoint(project(K,R*X5_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(5, WKeyPoint(cv::KeyPoint(project(K,R*X6_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(6, WKeyPoint(cv::KeyPoint(project(K,R*X7_prev + t), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(7, WKeyPoint(cv::KeyPoint(project(K,R*X8_prev + t), 1))));  
+
+    // Build the tracksMap
+    for (size_t i=0; i<mwkps_curr.size(); ++i)
+    {
+        tracks_prev_curr.insert(std::pair<size_t, size_t>(i,i));
+    }
+
+    // Let's try FilterWithEssential, all points here are inliers
+    processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
+    ASSERT_EQ(tracks_prev_curr.size(), mwkps_curr.size());
+
+    // We had here an outlier: a keyPoint that doesn't move
+    mwkps_prev.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
+    mwkps_curr.insert(std::pair<size_t, WKeyPoint>(8, WKeyPoint(cv::KeyPoint(cv::Point2d(120,140), 1))));
+    tracks_prev_curr.insert(std::pair<size_t, size_t>(8,8)); 
+
+    // point at 8 must be discarded
+    processor.filterWithEssential(mwkps_prev, mwkps_curr, tracks_prev_curr, E);
+    ASSERT_EQ(tracks_prev_curr.count(8), 0);
+
+}
+
+int main(int argc, char **argv)
+{
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/test/gtest_visual_odometry.yaml b/test/gtest_visual_odometry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f368ab0157a75f7f44bbcebe092675620d2e766f
--- /dev/null
+++ b/test/gtest_visual_odometry.yaml
@@ -0,0 +1,50 @@
+config:
+
+  problem:
+  
+    dimension:            3               # space is 3d
+    frame_structure:      "PO"            # keyframes have position and orientation
+  
+    prior:
+      mode:               "factor"
+      $state:
+        P: [0,0,0]
+        O: [0,0,0,1]
+      $sigma:
+        P: [0.001, 0.001, 0.001]
+        O: [0.31, 0.31, 0.31]
+      time_tolerance:     0.005
+
+    tree_manager:
+      type: "none"
+      plugin: "core"
+  
+  solver:
+    max_num_iterations: 100
+    verbose: 0
+    period: 0.2
+    interrupt_on_problem_change: false
+    n_threads: 2
+    compute_cov: false
+    minimizer: LEVENBERG_MARQUARDT
+    use_nonmonotonic_steps: false         # only for LEVENBERG_MARQUARDT and DOGLEG
+    function_tolerance: 0.000001
+    gradient_tolerance: 0.0000000001
+    
+  sensors:
+    
+    - type:               "SensorCamera"
+      plugin:             "vision"
+      name:               "sen cam"
+      extrinsic: 
+        pose:             [0,0,0, 0,0,0,1]
+      follow:             "test/camera_gazebo.yaml"         # config parameters in this file
+
+  processors:
+    
+    - type:               "ProcessorVisualOdometry"
+      plugin:             "vision"
+      name:               "prc vo"
+      sensor_name:        "sen cam"                                    # attach processor to this sensor
+      follow:             "test/processor_visual_odometry_gtest.yaml"        # config parameters in this file
+    
\ No newline at end of file
diff --git a/test/markers.jpg b/test/markers.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..aa213f536ee1ca805e12bd8ad9b59f2f0283016d
Binary files /dev/null and b/test/markers.jpg differ
diff --git a/test/processor_visual_odometry_gtest.yaml b/test/processor_visual_odometry_gtest.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..784003cacaaf0a2c6cd36f717eb274d67b56f428
--- /dev/null
+++ b/test/processor_visual_odometry_gtest.yaml
@@ -0,0 +1,56 @@
+time_tolerance: 0.005
+
+keyframe_vote:
+    voting_active: true
+    # Trigger a new keyframe creation as well as detection of new keypoints in last frame
+    # when the track number goes below min_features_for_keyframe in incoming
+    min_features_for_keyframe: 25
+
+# Use a robust cost function
+apply_loss_function: true
+
+# Select the best new Keypoints when performing detection
+max_new_features: 4
+
+####################################
+# ProcessorVisualOdometry parameters
+# FAST KeyPoint detection 
+fast_params:
+    # Threshold on the keypoint pixel intensity (in uchar [0-255]) 
+    # the higher, the more selective the detector is
+    threshold_fast: 10
+    # Avoids getting multiple keypoints at the same place
+    non_max_suppresion: false
+    
+# Lucas Kanade tracking parameters
+klt_params:
+    patch_width:      21
+    patch_height:     21
+    nlevels_pyramids: 3
+    klt_max_err:      0.2
+
+# tesselation grid    
+grid_params:
+    # number of cells used by the active search grid data structure
+    nbr_cells_h: 6  # horizontal
+    nbr_cells_v: 5  # vertical
+    # minimum margin of the region of interest from the edges of the image 
+    margin: 10
+    # reduce the size of each region of interest by n pixels to prevent keypoints from being too close
+    separation: 10
+
+ransac_params:
+    # specifies a desirable level of confidence (probability) that the estimated matrix is correct
+    ransac_prob: 0.999
+    # maximum distance from a point to an epipolar line in pixels, beyond which the point 
+    # is considered an outlier and is not used for computing the final fundamental matrix
+    ransac_thresh: 1
+
+# Keep the number of tracks below 
+max_nb_tracks: 30
+
+# standard deviation of the pixel reprojection factor 
+std_pix: 1
+
+# before creating a landmark, wait until the track is old enough
+min_track_length_for_landmark: 20
diff --git a/wolf_scripts/create_plugin.sh b/wolf_scripts/create_plugin.sh
deleted file mode 100755
index b1ca42f945f7f559a70e43377412faa8e6d4ec9c..0000000000000000000000000000000000000000
--- a/wolf_scripts/create_plugin.sh
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/bin/bash
-# $1 path to the root of the plugin
-# $2 name of the plugin
-# $3 files to be moved
-
-#Generate the necessary dirs
-# if [ ! -d $1 ];
-# then
-#     mkdir $1
-# fi
-
-# if [ ! -d $1/include/$2 ];
-# then
-#     # mkdir $1/include
-#     mkdir $1/include/$2
-# fi
-# if [ ! -d $1/src ];
-# then
-#     mkdir $1/src
-# fi
-root_dir=$(echo $1 | rev | cut -d '/' -f 2- | rev)
-if [ ! -d $root_dir/$2 ];
-then
-    cp -a ../Skeleton $root_dir
-    mv $root_dir/Skeleton $root_dir/$2
-    mv $root_dir/$2/include/skeleton $root_dir/$2/include/$2
-fi
-
-for folder in capture factor feature landmark processor sensor yaml ; do
-    if [ ! -d $1/include/$2/$folder ];
-    then
-        mkdir $1/include/$2/$folder
-    fi
-    if [ ! -d $1/src/$folder ];
-    then
-        mkdir $1/src/$folder
-    fi
-done
-for file in $(cat $3); do
-    head=$(echo $file | cut -d '/' -f 1)
-    if [ "$head" = "include" ];
-    then
-        folder=$(echo $file | cut -d '/' -f 3)
-        suffix=$(echo $file | cut -d '/' -f 4-)
-        line=$(ag "HDRS_"${folder^^} $1/CMakeLists.txt | cut -d ':' -f 1 | head -1)
-        line=$(($line + 1))
-        echo $line " && " $file " && " $folder " && " $suffix
-        if [ "$suffix" = "" ];
-        then
-            line=$(ag "HDRS_BASE" $1/CMakeLists.txt | cut -d ':' -f 1 | head -1)
-            line=$(($line + 1))
-            suffix=$folder
-            sed  -i ""$line"i $head/$2/$suffix" $1/CMakeLists.txt
-            cp $file $1/$head/$2/$suffix
-        else
-            sed  -i ""$line"i $head/$2/$folder/$suffix" $1/CMakeLists.txt
-            cp $file $1/$head/$2/$folder/$suffix
-        fi
-    elif [ "$head" = "src" ];
-    then
-        folder=$(echo $file | cut -d '/' -f 2)
-        suffix=$(echo $file | cut -d '/' -f 3-)
-        # ag "SRCS_"$folder $1/CMakeLists.txt
-        line=$(ag "SRCS_"${folder^^} $1/CMakeLists.txt | cut -d ':' -f 1 | head -1)
-        line=$(($line + 1))
-        echo $line " && " $file " && " $folder " && " $suffix
-        if [ "$suffix" = "" ];
-        then
-            line=$(ag "SRCS_BASE" $1/CMakeLists.txt | cut -d ':' -f 1 | head -1)
-            line=$(($line + 1))
-            suffix=$folder
-            sed  -i ""$line"i $file" $1/CMakeLists.txt
-            cp $file $1/$head/$suffix
-        else
-            sed  -i ""$line"i $file" $1/CMakeLists.txt
-            cp $file $1/$head/$folder/$suffix
-        fi
-    else
-        cp $file $1/$file
-    fi
-done
-for f in $(cat $3); do
-    hhead=$(echo $f | cut -d '/' -f 1)
-    if [ "$hhead" = "include" ];
-    then
-        ffolder=$(echo $f | cut -d '/' -f 3)
-        ssuffix=$(echo $f | cut -d '/' -f 4-)
-        inc=$ffolder/$ssuffix
-    else
-        continue
-    fi
-    for ff in $(cat $3); do
-        head=$(echo $ff | cut -d '/' -f 1)
-        if [ "$head" = "include" ];
-        then
-            folder=$(echo $ff | cut -d '/' -f 3)
-            suffix=$(echo $ff | cut -d '/' -f 4-)
-            if [ "$suffix" = "" ];
-            then
-                new_path=$1/$head/$2/$folder
-                sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $new_path
-            else
-                new_path=$1/$head/$2/$folder/$suffix
-                sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $new_path
-            fi
-            # sed -n -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@pg" $new_path
-            # sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $new_path
-        elif [ "$head" = "src" ];
-        then
-            folder=$(echo $ff | cut -d '/' -f 2)
-            suffix=$(echo $ff | cut -d '/' -f 3-)
-            # sed -n -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@pg" $new_path
-            if [ "$suffix" = "" ];
-            then
-                new_path=$1/$head/$folder
-                sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $new_path
-            else
-                new_path=$1/$head/$folder/$suffix
-                sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $new_path
-            fi
-        else
-            # sed -n -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@pg" $new_path
-            sed -i -E "s@(#include[[:space:]]+\")base(\/$inc\")@\1$2\2@g" $1/$ff
-        fi
-    done
-done
\ No newline at end of file