diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 35b51df5c2a92c0a230bf07f1c111421c290a551..049443c6c84fdc068728b49613f9eb017e16c4a8 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -299,7 +299,7 @@ if(BUILD_TESTS)
 endif()
 
 ## Examples ##
-IF(BUILD_EXAMPLES)
+IF(BUILD_EXAMPLES AND YAMLCPP_FOUND)
   MESSAGE("Building examples.")
   ADD_SUBDIRECTORY(examples)
-ENDIF(BUILD_EXAMPLES)
+ENDIF(BUILD_EXAMPLES AND YAMLCPP_FOUND)
diff --git a/src/algorithms/trackfeatures/alg_trackfeatures.cpp b/src/algorithms/trackfeatures/alg_trackfeatures.cpp
index d7afc214bb2db274e7b8a57f26d32369f5a992d9..57646dd0a1b356d49115164e52a8e92ba3534a60 100644
--- a/src/algorithms/trackfeatures/alg_trackfeatures.cpp
+++ b/src/algorithms/trackfeatures/alg_trackfeatures.cpp
@@ -127,7 +127,7 @@ void AlgorithmTRACKFEATURES::trackFrame(const FramePtr& _frame_old, FramePtr& _t
                 	cv::line(image_draw_, tracked_kp.pt, target_kps[ii].pt, cv::Scalar(0, 0, 255), 3);
                 	cv::circle(image_draw_, tracked_kp.pt, 5, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
                 	cv::putText(image_draw_, std::to_string(ii), tracked_kp.pt, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 0));
-                 }
+                }
             }
         }
     }
diff --git a/src/detectors/orb/detector_orb.h b/src/detectors/orb/detector_orb.h
index 6315703b48a121c041b8df15aae7823fcd22b6c3..06eb7db08d2fc8751d4123acdee04621d5ba0de4 100644
--- a/src/detectors/orb/detector_orb.h
+++ b/src/detectors/orb/detector_orb.h
@@ -15,7 +15,7 @@ VU_PTR_TYPEDEFS(DetectorParamsORB);
  */
 struct DetectorParamsORB: public ParamsBase
 {
-	int nfeatures = 20;					// The maximum number of features to retain.
+	int nfeatures = 200;					// The maximum number of features to retain.
 	float scaleFactor = 1.2f;  				// Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
 	int nlevels = 8;						// The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
 	int edgeThreshold = 16;					// This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
diff --git a/src/examples/CMakeLists.txt b/src/examples/CMakeLists.txt
index 97ce106ecf8e673481cee9b5767a15e61498b226..92893c4106c1a42002e9b24388febcb44ab47d15 100644
--- a/src/examples/CMakeLists.txt
+++ b/src/examples/CMakeLists.txt
@@ -1,19 +1,35 @@
-# create an example application
+# Factories
 ADD_EXECUTABLE(test_factories test_factories.cpp )
+TARGET_LINK_LIBRARIES(test_factories ${PROJECT_NAME})
+
+# Sensor
 ADD_EXECUTABLE(test_sensor test_sensor.cpp)
+TARGET_LINK_LIBRARIES(test_sensor vision_utils)
+
+# Detector
 ADD_EXECUTABLE(test_detector test_detector.cpp)
+TARGET_LINK_LIBRARIES(test_detector vision_utils)
+
+# Descriptor
 ADD_EXECUTABLE(test_descriptor test_descriptor.cpp)
+TARGET_LINK_LIBRARIES(test_descriptor vision_utils)
+
+# Matcher
 ADD_EXECUTABLE(test_matcher test_matcher.cpp)
+TARGET_LINK_LIBRARIES(test_matcher vision_utils)
+
+# Alg: optical flow
 ADD_EXECUTABLE(test_algorithm_opticalflowpyrlk test_algorithm_opticalflowpyrlk.cpp)
+TARGET_LINK_LIBRARIES(test_algorithm_opticalflowpyrlk vision_utils)
+
+# Alg: track features
 ADD_EXECUTABLE(test_algorithm_trackfeatures test_algorithm_trackfeatures.cpp)
+TARGET_LINK_LIBRARIES(test_algorithm_trackfeatures vision_utils)
+
+# Alg: active search
 ADD_EXECUTABLE(test_algorithm_activesearch test_algorithm_activesearch.cpp)
+TARGET_LINK_LIBRARIES(test_algorithm_activesearch vision_utils)
 
-# link necessary libraries
-TARGET_LINK_LIBRARIES(test_factories ${PROJECT_NAME})
-TARGET_LINK_LIBRARIES(test_sensor vision_utils)
-TARGET_LINK_LIBRARIES(test_detector vision_utils)
-TARGET_LINK_LIBRARIES(test_descriptor vision_utils)
-TARGET_LINK_LIBRARIES(test_matcher vision_utils)
-TARGET_LINK_LIBRARIES(test_algorithm_opticalflowpyrlk vision_utils)
-TARGET_LINK_LIBRARIES(test_algorithm_trackfeatures vision_utils)
-TARGET_LINK_LIBRARIES(test_algorithm_activesearch vision_utils)
\ No newline at end of file
+# eg: Fundamental matrix
+ADD_EXECUTABLE(fundamental_matrix fundamental_matrix.cpp)
+TARGET_LINK_LIBRARIES(fundamental_matrix vision_utils)
\ No newline at end of file
diff --git a/src/examples/fundamental_matrix.cpp b/src/examples/fundamental_matrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..95ec8a76fee4340f9af1d0155f89072dd2284b12
--- /dev/null
+++ b/src/examples/fundamental_matrix.cpp
@@ -0,0 +1,196 @@
+/*
+ * test_opencv.cpp
+ *
+ *  Created on: Apr 24, 2016
+ *      Author: jsola
+ */
+
+// Testing things for the 3D image odometry
+// general includes
+#include "unistd.h"
+#include <time.h>
+
+// Vision utils
+#include "../vision_utils.h"
+#include "../detectors.h"
+#include "../descriptors.h"
+#include "../matchers.h"
+#include "../common_class/buffer.h"
+
+// General includes
+#include <cmath>
+#include <complex>      // std::complex, std::norm
+
+//std includes
+#include <iostream>
+
+int main(int argc, char** argv)
+{
+    std::cout << std::endl << " ========= OpenCv test ===========" << std::endl << std::endl;
+
+    // parsing input params
+    const char * filename;
+    cv::VideoCapture capture;
+    if (argc < 2)
+    {
+        std::cout << "Please use\n\t./test_opencv <arg> "
+                "\nwith "
+                "\n\targ = <path/videoname.mp4> for video input "
+                "\n\targ = 0 for camera input." << std::endl;
+        return 0;
+    }
+    else if (std::string(argv[1]) == "0")
+    {
+        filename = "0"; // camera
+        capture.open(0);
+        std::cout << "Input stream from camera " << std::endl;
+    }
+    else
+    {
+        filename = argv[1]; // provided through argument
+        capture.open(filename);
+        std::cout << "Input video file: " << filename << std::endl;
+    }
+
+    // Open input stream
+    if (!capture.isOpened())  // check if we succeeded
+        std::cout << "failed" << std::endl;
+    else
+        std::cout << "succeded" << std::endl;
+
+    // set image detector
+    vision_utils::DetectorParamsORBPtr det_params = std::make_shared<vision_utils::DetectorParamsORB>();
+    vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector("ORB", "ORB detector", det_params);
+    vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
+
+    // set image descriptor
+    vision_utils::DescriptorParamsORBPtr desc_params = std::make_shared<vision_utils::DescriptorParamsORB>();
+    vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor("ORB", "ORB detector", desc_params);
+    vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
+
+    // set matcher
+    vision_utils::MatcherParamsBasePtr mat_params = std::make_shared<vision_utils::MatcherParamsBase>();
+    mat_params->match_type = vision_utils::MATCH;
+    std::string matcher_name = "BRUTEFORCE_HAMMING_2";
+	vision_utils::MatcherBasePtr mat_b_ptr = vision_utils::setupMatcher(matcher_name, matcher_name, mat_params);
+	vision_utils::MatcherBRUTEFORCE_HAMMING_2Ptr mat_ptr = std::static_pointer_cast<vision_utils::MatcherBRUTEFORCE_HAMMING_2>(mat_b_ptr);
+
+
+    // set and print image properties
+    unsigned int img_width = (unsigned int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
+    unsigned int img_height = (unsigned int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
+    std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
+
+    // set the image buffer
+    unsigned int buffer_size = 5;
+    vision_utils::Buffer<cv::Mat> image_buffer(buffer_size);
+    cv::Mat img;
+    capture >> img; image_buffer.add(img);
+
+    // user interface
+    cv::namedWindow("Feature tracker"); // Creates a window for display.
+    cv::moveWindow("Feature tracker", 0, 0);
+
+
+    // First image
+    KeyPointVector keypoints_1 = det_ptr->detect(image_buffer.back());
+    cv::Mat descriptors_1 = des_ptr->getDescriptor( image_buffer.back(), keypoints_1);
+
+    while(1)
+    {
+    	capture >> img;
+
+    	// declare all variables
+        DMatchVector matches;
+        cv::Mat inliers_bool;
+        cv::Mat img_matches;
+        cv::Mat img_scaled;
+
+        PointVector matched_1, matched_2;
+        DMatchVector good_matches, inlier_matches;
+        PointVector inliers_1, inliers_2;
+
+        // detect and describe in new image
+        KeyPointVector keypoints_2 = det_ptr->detect(img);
+
+        if (keypoints_2.size()>0)
+        {
+        	image_buffer.add(img);
+
+        	cv::Mat descriptors_2 = des_ptr->getDescriptor( image_buffer.back(), keypoints_1);
+
+        	unsigned int max_dist = 0;
+        	unsigned int min_dist = 512;
+
+        	if (keypoints_1.size() * keypoints_2.size() != 0)
+        	{
+        		// match (try flann later)
+        		//-- Step 3: Matching descriptor vectors using FLANN matcher
+        		mat_ptr->match(descriptors_1,descriptors_2,matches);
+
+        		//-- Quick calculation of max and min distances between keypoints
+        		for (int ii = 0; ii < descriptors_1.rows; ii++)
+        		{
+        			unsigned int dist = matches[ii].distance;
+        			if (dist < min_dist)
+        				min_dist = dist;
+        			if (dist > max_dist)
+        			max_dist = dist;
+        		}
+        	}
+        	else
+        	{
+        		min_dist = 999;
+        		max_dist = 999;
+        	}
+
+        	printf("-- Max dist : %d \n", max_dist);
+        	printf("-- Min dist : %d \n", min_dist);
+
+        	//-- Select only "good" matches (i.e. those at a distance which is small)
+        	for (unsigned int ii = 0; ii < matches.size(); ii++)
+        	{
+        		if (matches[ii].distance <= 50) //std::max(2*min_dist, 0.02) )
+        		{
+        			good_matches.push_back(matches[ii]);
+        			matched_1.push_back(keypoints_1[matches[ii].trainIdx].pt);
+        			matched_2.push_back(keypoints_1[matches[ii].queryIdx].pt);
+        		}
+        	}
+
+        	// Compute the Fundamental matrix F and discard outliers through ransac in one go
+        	if (good_matches.size() > 20) // Minimum is 8 points, but we need more for robustness
+        	{
+        		// find fundamental matrix using RANSAC, return set of inliers as bool vector
+        		cv::Mat F = findFundamentalMat(matched_1, matched_2, cv::FM_RANSAC, 3.0, 0.98, inliers_bool);
+
+        		std::cout << "F = " << F << std::endl;
+
+        		// Recover the inlier matches
+        		for (unsigned int ii = 0; ii < good_matches.size(); ii++)
+        			if (inliers_bool.at<char>(ii) == 1)
+        				inlier_matches.push_back(good_matches[ii]);
+        	}
+
+        	std::cout << "Matches: initial " << matches.size() << " | good " << good_matches.size() << " | inliers "
+        			<< inlier_matches.size() << std::endl;
+
+        	std::cout << "+++++++++++++++" << std::endl;
+        	std::cout << keypoints_1.size() << " " << keypoints_2.size() << " " << inlier_matches.size() << " " << img_matches.size() << std::endl;
+
+
+        	// Draw RANSAC inliers
+        	cv::drawMatches(image_buffer[image_buffer.size()-2], keypoints_1, image_buffer.back(), keypoints_2, inlier_matches, img_matches,
+        			cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 0); //cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
+
+        	// Show detected matches
+        	imshow("Feature tracker", img_matches);
+        	cv::waitKey(1);
+
+        	keypoints_1.clear();
+			keypoints_1	= keypoints_2;
+        	descriptors_1 = descriptors_2;
+        }
+    }
+}
+
diff --git a/src/examples/yaml/ACTIVESEARCH.yaml b/src/examples/yaml/ACTIVESEARCH.yaml
index 96d8b31d24d51dbdf9918a6a74d67083a83d02e2..e8227db2f00ee4c87d4d9d5c61924a1f8ae8aa7f 100644
--- a/src/examples/yaml/ACTIVESEARCH.yaml
+++ b/src/examples/yaml/ACTIVESEARCH.yaml
@@ -3,25 +3,25 @@ sensor:
 
 detector:
   type: "ORB"         
-  nfeatures: 100
+  nfeatures: 200
   scale factor: 1.2
   nlevels: 8
   edge threshold: 8   # 16
   first level: 0 
   WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
+  score type: 0       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+  patch size: 16      # 31
 
 descriptor:
   type: "ORB"         
-  nfeatures: 100
+  nfeatures: 200
   scale factor: 1.2
   nlevels: 8
   edge threshold: 8   # 16
   first level: 0 
   WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
+  score type: 0       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+  patch size: 16      # 31
 
 matcher:
   type: "FLANNBASED"
@@ -35,6 +35,6 @@ algorithm:
   margin: 0
   separation: 5
   min features to track: 20
-  max new features: 100
+  max new features: 200
   min response new features: 80
   matcher min normalized score: 0.85
\ No newline at end of file
diff --git a/src/examples/yaml/OPTFLOWPYRLK.yaml b/src/examples/yaml/OPTFLOWPYRLK.yaml
index 76cd227b266267f9e8940b05a72a905f0914576d..1217beb8ab79213d88f682abeb351ee1ba5a6fe1 100644
--- a/src/examples/yaml/OPTFLOWPYRLK.yaml
+++ b/src/examples/yaml/OPTFLOWPYRLK.yaml
@@ -9,8 +9,8 @@ detector:
   edge threshold: 8   # 16
   first level: 0 
   WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
+  score type: 0       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+  patch size: 16      # 31
   
 algorithm:
   type: "OPTFLOWPYRLK" 
diff --git a/src/examples/yaml/ORB.yaml b/src/examples/yaml/ORB.yaml
index ce729bd363cd7c9baa68d261c3a3c3135f0a1c1b..a5eac6ad686c50e79978d3b7d5c0cc95a85de409 100644
--- a/src/examples/yaml/ORB.yaml
+++ b/src/examples/yaml/ORB.yaml
@@ -9,8 +9,8 @@ detector:
   edge threshold: 8   # 16
   first level: 0 
   WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
+  score type: 0       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+  patch size: 16      # 31
 
 descriptor:
   type: "ORB"         
@@ -20,7 +20,7 @@ descriptor:
   edge threshold: 8   # 16
   first level: 0 
   WTA_K: 2            # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
-  score type: 1       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
-  patch size: 15      # 31
+  score type: 0       #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+  patch size: 16      # 31
     
     
diff --git a/src/test/gtest_descriptors.cpp b/src/test/gtest_descriptors.cpp
index 7d04457b206385b7c2ce4702d73cfd0f719368db..0d94bc7ffa32e0d9d49349046c541a41f3548b4d 100644
--- a/src/test/gtest_descriptors.cpp
+++ b/src/test/gtest_descriptors.cpp
@@ -58,7 +58,7 @@ TEST(Descriptors, BRIEF)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 17);
+    ASSERT_EQ(keypoints.size(), 135);
 
     vision_utils::DescriptorParamsBRIEFPtr des_params = std::make_shared<vision_utils::DescriptorParamsBRIEF>();
     std::string des_name = "BRIEF";
@@ -108,7 +108,7 @@ TEST(Descriptors, DAISY)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 17);
+    ASSERT_EQ(keypoints.size(), 135);
 
     vision_utils::DescriptorParamsDAISYPtr des_params = std::make_shared<vision_utils::DescriptorParamsDAISY>();
     std::string des_name = "DAISY";
@@ -133,7 +133,7 @@ TEST(Descriptors, FREAK)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 17);
+    ASSERT_EQ(keypoints.size(), 135);
 
     vision_utils::DescriptorParamsFREAKPtr des_params = std::make_shared<vision_utils::DescriptorParamsFREAK>();
     std::string des_name = "FREAK";
@@ -183,7 +183,7 @@ TEST(Descriptors, LATCH)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 17);
+    ASSERT_EQ(keypoints.size(), 135);
 
     vision_utils::DescriptorParamsLATCHPtr des_params = std::make_shared<vision_utils::DescriptorParamsLATCH>();
     std::string des_name = "LATCH";
@@ -208,7 +208,7 @@ TEST(Descriptors, LUCID)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorAKAZEPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorAKAZE>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 20);
+    ASSERT_EQ(keypoints.size(), 200);
 
     vision_utils::DescriptorParamsLUCIDPtr des_params = std::make_shared<vision_utils::DescriptorParamsLUCID>();
     std::string des_name = "LUCID";
@@ -233,7 +233,7 @@ TEST(Descriptors, ORB)
     vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector(det_name, det_name + " detector", params);
     vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
     std::vector<cv::KeyPoint> keypoints = det_ptr->detect(image);
-    ASSERT_EQ(keypoints.size(), 17);
+    ASSERT_EQ(keypoints.size(), 135);
 
     vision_utils::DescriptorParamsORBPtr des_params = std::make_shared<vision_utils::DescriptorParamsORB>();
     std::string des_name = "ORB";
diff --git a/src/test/gtest_sensor_usb_cam.cpp b/src/test/gtest_sensor_usb_cam.cpp
index 810f9aa9a1a963435fd151680235c054f01ff67b..48e121b461205df2ac092ffee54e948dc80d096c 100644
--- a/src/test/gtest_sensor_usb_cam.cpp
+++ b/src/test/gtest_sensor_usb_cam.cpp
@@ -4,12 +4,19 @@
 
 std::string sen_name = "USB_CAM";
 std::string vu_root = _VU_ROOT_DIR;
-std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
+
 int cam_num = 0;
-vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
 
 TEST(SensorUsbCam, constructor)
 {
+	vision_utils::IntrinsicsCameraPtr params = std::make_shared<vision_utils::IntrinsicsCamera>();
+	std::string sen_name = "USB_CAM";
+	vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, params);
+#ifdef USING_YAML
+	std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
+	sen_name = vision_utils::readYamlType(yaml_file_params, "sensor");
+	sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
+#endif
 	vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr);
     ASSERT_TRUE(sen_ptr!=NULL);
     ASSERT_TRUE(sen_name.compare(sen_ptr->getName() ) == 0);
@@ -17,6 +24,14 @@ TEST(SensorUsbCam, constructor)
 
 TEST(SensorUsbCam, getImage)
 {
+	vision_utils::IntrinsicsCameraPtr params = std::make_shared<vision_utils::IntrinsicsCamera>();
+	std::string sen_name = "USB_CAM";
+	vision_utils::SensorBasePtr sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, params);
+#ifdef USING_YAML
+	std::string yaml_file_params = vu_root + "/src/test/data/base_classes.yaml";
+	sen_name = vision_utils::readYamlType(yaml_file_params, "sensor");
+	sen_b_ptr = vision_utils::setupSensor(sen_name, sen_name, yaml_file_params);
+#endif
 	vision_utils::SensorCameraPtr sen_ptr = std::static_pointer_cast<vision_utils::SensorCamera>(sen_b_ptr);
     std::cout << "             ";
 	if (sen_ptr->open(cam_num))