Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mobile_robotics/wolf_projects/wolf_lib/wolf
1 result
Show changes
Commits on Source (110)
Showing
with 8 additions and 10858 deletions
......@@ -29,6 +29,7 @@ before_script:
- cd build
- ls
- cmake -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} -fPIC" -DSPDLOG_BUILD_TESTING=OFF ..
- make -j$(nproc)
- make install
- cd ../..
......@@ -54,6 +55,7 @@ before_script:
- cd build
- ls
- cmake -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} -fPIC" -DYAML_CPP_BUILD_TESTS=OFF ..
- make -j$(nproc)
- make install
- cd ../..
......
......@@ -167,6 +167,7 @@ SET(HDRS_COMMON
include/core/common/params_base.h
)
SET(HDRS_MATH
include/core/math/SE2.h
include/core/math/SE3.h
include/core/math/rotations.h
)
......@@ -215,7 +216,7 @@ SET(HDRS_CAPTURE
include/core/capture/capture_pose.h
include/core/capture/capture_velocity.h
include/core/capture/capture_void.h
include/core/capture/capture_wheel_joint_position.h
include/core/capture/capture_diff_drive.h
)
SET(HDRS_FACTOR
include/core/factor/factor_analytic.h
......@@ -234,8 +235,8 @@ SET(HDRS_FACTOR
)
SET(HDRS_FEATURE
include/core/feature/feature_base.h
include/core/feature/feature_diff_drive.h
include/core/feature/feature_match.h
include/core/feature/feature_motion.h
include/core/feature/feature_odom_2D.h
include/core/feature/feature_pose.h
)
......@@ -244,9 +245,6 @@ SET(HDRS_LANDMARK
include/core/landmark/landmark_match.h
)
SET(HDRS_PROCESSOR
include/core/processor/autoconf_processor_factory.h
include/core/processor/diff_drive_tools.h
include/core/processor/diff_drive_tools.hpp
include/core/processor/motion_buffer.h
include/core/processor/processor_base.h
include/core/processor/processor_diff_drive.h
......@@ -262,7 +260,6 @@ SET(HDRS_PROCESSOR
include/core/processor/track_matrix.h
)
SET(HDRS_SENSOR
include/core/sensor/autoconf_sensor_factory.h
include/core/sensor/sensor_base.h
include/core/sensor/sensor_diff_drive.h
include/core/sensor/sensor_factory.h
......@@ -324,7 +321,7 @@ SET(SRCS_CAPTURE
src/capture/capture_pose.cpp
src/capture/capture_velocity.cpp
src/capture/capture_void.cpp
src/capture/capture_wheel_joint_position.cpp
src/capture/capture_diff_drive.cpp
)
SET(SRCS_FACTOR
src/factor/factor_analytic.cpp
......@@ -332,7 +329,7 @@ SET(SRCS_FACTOR
)
SET(SRCS_FEATURE
src/feature/feature_base.cpp
src/feature/feature_diff_drive.cpp
src/feature/feature_motion.cpp
src/feature/feature_odom_2D.cpp
src/feature/feature_pose.cpp
)
......@@ -369,6 +366,7 @@ SET(SRCS_SOLVER
)
SET(SRCS_YAML
src/yaml/processor_odom_3D_yaml.cpp
src/yaml/sensor_odom_2D_yaml.cpp
src/yaml/sensor_odom_3D_yaml.cpp
)
#OPTIONALS
......
sensor:
type: "USB_CAM"
detector:
type: "ORB"
nfeatures: 100
scale factor: 1.2
nlevels: 8
edge threshold: 8 # 16
first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31
descriptor:
type: "ORB"
nfeatures: 100
scale factor: 1.2
nlevels: 8
edge threshold: 8 # 16
first level: 0
WTA_K: 2 # See: http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html#a180ae17d3300cf2c619aa240d9b607e5
score type: 1 #enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
patch size: 15 # 31
matcher:
type: "FLANNBASED"
match type: 1 # Match type. MATCH = 1, KNNMATCH = 2, RADIUSMATCH = 3
roi:
width: 20
height: 20
min normalized score: 0.85
algorithm:
type: "ACTIVESEARCH"
draw results: false
grid horiz cells: 12
grid vert cells: 8
separation: 10
min features to track: 5
max new features: 40
min response new features: 80
\ No newline at end of file
# Enable Yaml config files
# Building and populating the wolf tree from .graph file (TORO)
#ADD_EXECUTABLE(test_wolf_imported_graph test_wolf_imported_graph.cpp)
#TARGET_LINK_LIBRARIES(test_wolf_imported_graph ${PROJECT_NAME})
# Comparing analytic and autodiff odometry factors
#ADD_EXECUTABLE(test_analytic_autodiff_factor test_analytic_autodiff_factor.cpp)
#TARGET_LINK_LIBRARIES(test_analytic_autodiff_factor ${PROJECT_NAME})
\ No newline at end of file
image_width: 640
image_height: 480
camera_name: mono
camera_matrix:
rows: 3
cols: 3
data: [809.135074, 0.000000, 335.684471, 0.000000, 809.410030, 257.352121, 0.000000, 0.000000, 1.000000]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [-0.416913, 0.264210, -0.000221, -0.000326, 0.000000]
rectification_matrix:
rows: 3
cols: 3
data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
projection_matrix:
rows: 3
cols: 4
data: [753.818405, 0.000000, 337.203047, 0.000000, 0.000000, 777.118492, 258.102663, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
image_width: 640
image_height: 480
camera_name: mono
camera_matrix:
rows: 3
cols: 3
data: [809.135074, 0.000000, 335.684471, 0.000000, 809.410030, 257.352121, 0.000000, 0.000000, 1.000000]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [-0.416913, 0.264210, 0, 0, 0]
rectification_matrix:
rows: 3
cols: 3
data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
projection_matrix:
rows: 3
cols: 4
data: [753.818405, 0.000000, 337.203047, 0.000000, 0.000000, 777.118492, 258.102663, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
image_width: 640
image_height: 480
#camera_name: narrow_stereo
camera_name: camera
camera_matrix:
rows: 3
cols: 3
data: [711.687376, 0.000000, 323.306816, 0.000000, 710.933260, 232.005822, 0.000000, 0.000000, 1.000000]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [0.067204, -0.141639, 0, 0, 0]
# data: [0.067204, -0.141639, 0.004462, -0.000636, 0.000000]
rectification_matrix:
rows: 3
cols: 3
data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
projection_matrix:
rows: 3
cols: 4
data: [718.941772, 0.000000, 323.016804, 0.000000, 0.000000, 717.174622, 233.475721, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
image_width: 640
image_height: 480
camera_name: narrow_stereo
camera_matrix:
rows: 3
cols: 3
data: [872.791604, 0, 407.599166, 0, 883.154343, 270.343971, 0, 0, 1]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [-0.284384, -0.030014, -0.01554, -0.003363, 0]
rectification_matrix:
rows: 3
cols: 3
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
projection_matrix:
rows: 3
cols: 4
data: [826.19989, 0, 413.746093, 0, 0, 863.790649, 267.937027, 0, 0, 0, 1, 0]
\ No newline at end of file
image_width: 2
image_height: 2
camera_name: canonical
camera_matrix:
rows: 3
cols: 3
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
distortion_model: none
distortion_coefficients:
rows: 1
cols: 5
data: [0, 0, 0, 0, 0]
rectification_matrix:
rows: 3
cols: 3
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
projection_matrix:
rows: 3
cols: 4
data: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]
\ No newline at end of file
image_width: 640
image_height: 480
camera_name: camera
camera_matrix:
rows: 3
cols: 3
data: [392.796383, 0, 350.175772, 0, 392.779002, 255.209917, 0, 0, 1]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [-0.3, 0.096, 0, 0, 0]
rectification_matrix:
rows: 3
cols: 3
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
projection_matrix:
rows: 3
cols: 4
data: [313.834106, 0, 361.199457, 0, 0, 349.145264, 258.654166, 0, 0, 0, 1, 0]
image_width: 640
image_height: 480
camera_name: narrow_stereo
camera_matrix:
rows: 3
cols: 3
data: [402.860630, 0.000000, 350.628016, 0.000000, 403.220300, 269.746108, 0.000000, 0.000000, 1.000000]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [-0.276945, 0.095681, 0.000000, 0.000000, -0.013371]
rectification_matrix:
rows: 3
cols: 3
data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
projection_matrix:
rows: 3
cols: 4
data: [323.792358, 0.000000, 363.187868, 0.000000, 0.000000, 357.040344, 276.369440, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
image_width: 640
image_height: 480
camera_name: camera
camera_matrix:
rows: 3
cols: 3
data: [320, 0, 320, 0, 320, 240, 0, 0, 1]
distortion_model: plumb_bob
distortion_coefficients:
rows: 1
cols: 5
data: [0, 0, 0, 0, 0]
rectification_matrix:
rows: 3
cols: 3
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
projection_matrix:
rows: 3
cols: 4
data: [320, 0, 320, 0, 0, 320, 240, 0, 0, 0, 1, 0]
demos/demo_ORB.png

10.7 KiB

/**
* \file test_apriltag.cpp
*
* Created on: Dec 14, 2018
* \author: Dinesh Atchtuhan
*/
//Wolf
#include "core/wolf.h"
#include "core/rotations.h"
#include "core/problem.h"
#include "core/ceres_wrapper/ceres_manager.h"
#include "core/sensor/sensor_camera.h"
#include "core/processor/processor_tracker_landmark_apriltag.h"
#include "core/capture/capture_image.h"
#include "core/feature/feature_apriltag.h"
// opencv
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/opencv.hpp"
// Eigen
#include <Eigen/Core>
#include <Eigen/Geometry>
// std
#include <iostream>
#include <stdlib.h>
void draw_apriltag(cv::Mat image, std::vector<cv::Point2d> corners, int thickness=1, bool draw_corners=false);
int main(int argc, char *argv[])
{
/*
* HOW TO USE ?
* For now, just call the executable and append the list of images to be processed.
* The images must be placed in the root folder of your wolf project.
* Ex:
* ./test_apriltag frame1.jpg frame2.jpg frame3.jpg
*/
using namespace wolf;
// General execution options
const bool APPLY_CONTRAST = false;
const bool IMAGE_OUTPUT = true;
const bool USEMAP = false;
WOLF_INFO( "==================== processor apriltag test ======================" )
std::string wolf_root = _WOLF_ROOT_DIR;
// Wolf problem
ProblemPtr problem = Problem::create("PO", 3);
ceres::Solver::Options options;
options.function_tolerance = 1e-6;
options.max_num_iterations = 100;
CeresManagerPtr ceres_manager = std::make_shared<CeresManager>(problem, options);
WOLF_INFO( "==================== Configure Problem ======================" )
Eigen::Vector7s cam_extrinsics; cam_extrinsics << 0,0,0, 0,0,0,1;
SensorBasePtr sen = problem->installSensor("CAMERA", "camera", cam_extrinsics, wolf_root + "/src/examples/camera_logitech_c300_640_480.yaml");
// SensorBasePtr sen = problem->installSensor("CAMERA", "camera", cam_extrinsics, wolf_root + "/src/examples/camera_Dinesh_LAAS_params_notangentrect.yaml");
SensorCameraPtr sen_cam = std::static_pointer_cast<SensorCamera>(sen);
ProcessorBasePtr prc = problem->installProcessor("TRACKER LANDMARK APRILTAG", "apriltags", "camera", wolf_root + "/src/examples/processor_tracker_landmark_apriltag.yaml");
if (USEMAP){
problem->loadMap(wolf_root + "/src/examples/maps/map_apriltag_logitech_1234.yaml");
for (auto lmk : problem->getMap()->getLandmarkList()){
lmk->fix();
}
}
// set prior
Eigen::Matrix6s covariance = Matrix6s::Identity();
Scalar std_m;
Scalar std_deg;
if (USEMAP){
std_m = 100; // standard deviation on original translation
std_deg = 180; // standard deviation on original rotation
}
else {
std_m = 0.00001; // standard deviation on original translation
std_deg = 0.00001; // standard deviation on original rotation
}
covariance.topLeftCorner(3,3) = std_m*std_m * covariance.topLeftCorner(3,3);
covariance.bottomRightCorner(3,3) = (M_TORAD*std_deg)*(M_TORAD*std_deg) * covariance.bottomRightCorner(3,3);
if (USEMAP){
FrameBasePtr F1 = problem->setPrior((Vector7s()<<0.08, 0.15, -0.75, 0, 0, 0, 1).finished(), covariance, 0.0, 0.1);
}
else {
FrameBasePtr F1 = problem->setPrior((Vector7s()<<0,0,0,0,0,0,1).finished(), covariance, 0.0, 0.1);
F1->fix();
}
// first argument is the name of the program.
// following arguments are path to image (from wolf_root)
const int inputs = argc -1;
WOLF_DEBUG("nb of images: ", inputs);
cv::Mat frame;
Scalar ts(0);
Scalar dt = 1;
WOLF_INFO( "==================== Main loop ======================" )
for (int input = 1; input <= inputs; input++) {
std::string path = wolf_root + "/" + argv[input];
frame = cv::imread(path, CV_LOAD_IMAGE_COLOR);
if( frame.data ){ //if imread succeeded
if (APPLY_CONTRAST){
Scalar alpha = 2.0; // to tune contrast [1-3]
int beta = 0; // to tune lightness [0-100]
// Do the operation new_image(i,j) = alpha*image(i,j) + beta
for( int y = 0; y < frame.rows; y++ ){
for( int x = 0; x < frame.cols; x++ ){
for( int c = 0; c < 3; c++ ){
frame.at<cv::Vec3b>(y,x)[c] = cv::saturate_cast<uchar>( alpha*( frame.at<cv::Vec3b>(y,x)[c] ) + beta );
}
}
}
}
CaptureImagePtr cap = std::make_shared<CaptureImage>(ts, sen_cam, frame);
// cap->setType(argv[input]); // only for problem->print() to show img filename
cap->setName(argv[input]);
WOLF_DEBUG("Processing image...", path);
sen->process(cap);
if (IMAGE_OUTPUT){
cv::namedWindow( cap->getName(), cv::WINDOW_NORMAL );// Create a window for display.
}
}
else
WOLF_WARN("could not load image ", path);
ts += dt;
}
if (IMAGE_OUTPUT){
WOLF_INFO( "==================== Draw all detections ======================" )
for (auto F : problem->getTrajectory()->getFrameList())
{
if (F->isKey())
{
for (auto cap : F->getCaptureList())
{
if (cap->getType() == "IMAGE")
{
auto img = std::static_pointer_cast<CaptureImage>(cap);
for (FeatureBasePtr f : img->getFeatureList())
{
FeatureApriltagPtr fa = std::static_pointer_cast<FeatureApriltag>(f);
draw_apriltag(img->getImage(), fa->getTagCorners(), 1);
}
cv::imshow( img->getName(), img->getImage() ); // display original image.
cv::waitKey(1);
}
}
}
}
}
// WOLF_INFO( "==================== Provide perturbed prior ======================" )
// for (auto kf : problem->getTrajectory()->getFrameList())
// {
// Vector7s x;
// if (kf->isKey())
// {
// x.setRandom();
// x.tail(4).normalize();
// kf->setState(x);
// }
// }
WOLF_INFO( "==================== Solve problem ======================" )
std::string report = ceres_manager->solve(SolverManager::ReportVerbosity::FULL); // 0: nothing, 1: BriefReport, 2: FullReport
WOLF_DEBUG(report);
problem->print(3,0,1,1);
WOLF_INFO("============= SOLVED PROBLEM : POS | EULER (DEG) ===============")
for (auto kf : problem->getTrajectory()->getFrameList())
{
if (kf->isKey())
for (auto cap : kf->getCaptureList())
{
if (cap->getType() != "POSE")
{
Vector3s T = kf->getP()->getState();
Vector4s qv= kf->getO()->getState();
Vector3s e = M_TODEG * R2e(q2R(qv));
WOLF_DEBUG("KF", kf->id(), " => ", T.transpose(), " | ", e.transpose());
}
}
}
for (auto lmk : problem->getMap()->getLandmarkList())
{
Vector3s T = lmk->getP()->getState();
Vector4s qv= lmk->getO()->getState();
Vector3s e = M_TODEG * R2e(q2R(qv));
WOLF_DEBUG(" L", lmk->id(), " => ", T.transpose(), " | ", e.transpose());
}
// ===============================================
// COVARIANCES ===================================
// ===============================================
// Print COVARIANCES of all states
WOLF_INFO("======== COVARIANCES OF SOLVED PROBLEM : POS | QUAT =======")
ceres_manager->computeCovariances(SolverManager::CovarianceBlocksToBeComputed::ALL_MARGINALS);
for (auto kf : problem->getTrajectory()->getFrameList())
if (kf->isKey())
{
Eigen::MatrixXs cov = kf->getCovariance();
WOLF_DEBUG("KF", kf->id(), "_std (sigmas) = ", cov.diagonal().transpose().array().sqrt());
}
for (auto lmk : problem->getMap()->getLandmarkList())
{
Eigen::MatrixXs cov = lmk->getCovariance();
WOLF_DEBUG(" L", lmk->id(), "_std (sigmas) = ", cov.diagonal().transpose().array().sqrt());
}
std::cout << std::endl;
// ===============================================
// SAVE MAP TO YAML ==============================
// ===============================================
//
// problem->saveMap(wolf_root + "/src/examples/map_apriltag_set3_HC.yaml", "set3");
if (IMAGE_OUTPUT){
cv::waitKey(0);
cv::destroyAllWindows();
}
return 0;
}
void draw_apriltag(cv::Mat image, std::vector<cv::Point2d> corners,
int thickness, bool draw_corners) {
cv::line(image, corners[0], corners[1], CV_RGB(255, 0, 0), thickness);
cv::line(image, corners[1], corners[2], CV_RGB(0, 255, 0), thickness);
cv::line(image, corners[2], corners[3], CV_RGB(0, 0, 255), thickness);
cv::line(image, corners[3], corners[0], CV_RGB(255, 0, 255), thickness);
///////
// Leads to implement other displays
///////
// const auto line_type = cv::LINE_AA;
// if (draw_corners) {
// int r = thickness;
// cv::circle(image, cv::Point2i(p[0].x, p[0].y), r, CV_RGB(255, 0, 0), -1,
// line_type);
// cv::circle(image, cv::Point2i(p[1].x, p[1].y), r, CV_RGB(0, 255, 0), -1,
// line_type);
// cv::circle(image, cv::Point2i(p[2].x, p[2].y), r, CV_RGB(0, 0, 255), -1,
// line_type);
// cv::circle(image, cv::Point2i(p[3].x, p[3].y), r, CV_RGB(255, 0, 255), -1,
// line_type);
// }
// cv::putText(image, std::to_string(apriltag.id),
// cv::Point2f(apriltag.center.x - 5, apriltag.center.y + 5),
// cv::FONT_HERSHEY_SIMPLEX, 1, CV_RGB(255, 0, 255), 2, line_type);
}
//void DrawApriltags(cv::Mat &image, const ApriltagVec &apriltags) {
// for (const auto &apriltag : apriltags) {
//// DrawApriltag(image, apriltag);
// DrawApriltag(image, apriltag, 1);
// }
//}
#include "core/math/pinhole_tools.h"
#include "core/landmark/landmark_AHP.h"
#include "core/factor/factor_AHP.h"
#include "core/state_block/state_block.h"
#include "core/state_block/state_quaternion.h"
#include "core/sensor/sensor_camera.h"
#include "core/capture/capture_image.h"
int main()
{
using namespace wolf;
std::cout << std::endl << "==================== factor AHP test ======================" << std::endl;
TimeStamp t = 1;
//=====================================================
// Environment variable for configuration files
std::string wolf_root = _WOLF_ROOT_DIR;
std::cout << wolf_root << std::endl;
//=====================================================
// Wolf problem
ProblemPtr wolf_problem_ptr_ = Problem::create("PO", 3);
/* Do this while there aren't extrinsic parameters on the yaml */
Eigen::Vector7s extrinsic_cam;
extrinsic_cam.setRandom();
// extrinsic_cam[0] = 0; //px
// extrinsic_cam[1] = 0; //py
// extrinsic_cam[2] = 0; //pz
// extrinsic_cam[3] = 0; //qx
// extrinsic_cam[4] = 0; //qy
// extrinsic_cam[5] = 0; //qz
// extrinsic_cam[6] = 1; //qw
extrinsic_cam.tail<4>().normalize();
std::cout << "========extrinsic_cam: " << extrinsic_cam.transpose() << std::endl;
const Eigen::VectorXs extr = extrinsic_cam;
/* Do this while there aren't extrinsic parameters on the yaml */
SensorBasePtr sensor_ptr = wolf_problem_ptr_->installSensor("CAMERA", "PinHole", extr, wolf_root + "/src/examples/camera_params_ueye_radial_dist.yaml");
SensorCameraPtr camera_ptr_ = std::static_pointer_cast<SensorCamera>(sensor_ptr);
// PROCESSOR
// one-liner API
ProcessorBasePtr processor_ptr = wolf_problem_ptr_->installProcessor("IMAGE LANDMARK", "ORB", "PinHole", wolf_root + "/src/examples/processor_image_feature.yaml");
// create the current frame
Eigen::Vector7s frame_pos_ori;
frame_pos_ori.setRandom();
// frame_pos_ori[0] = 0; //px
// frame_pos_ori[1] = 0; //py
// frame_pos_ori[2] = 0; //pz
// frame_pos_ori[3] = 0; //qx
// frame_pos_ori[4] = 0; //qy
// frame_pos_ori[5] = 0; //qz
// frame_pos_ori[6] = 1; //qw
frame_pos_ori.tail<4>().normalize();
const Eigen::VectorXs frame_val = frame_pos_ori;
FrameBasePtr last_frame = std::make_shared<FrameBase>(t,std::make_shared<StateBlock>(frame_val.head(3)), std::make_shared<StateQuaternion>(frame_val.tail(4)));
std::cout << "Last frame" << std::endl;
wolf_problem_ptr_->getTrajectory()->addFrame(last_frame);
// Capture
CaptureImagePtr image_ptr;
t.setToNow();
cv::Mat frame; //puede que necesite una imagen
image_ptr = std::make_shared< CaptureImage>(t, camera_ptr_, frame);
last_frame->addCapture(image_ptr);
// create the feature
cv::KeyPoint kp; kp.pt = {10,20};
cv::Mat desc;
FeaturePointImagePtr feat_point_image_ptr = std::make_shared<FeaturePointImage>(kp, 0, desc, Eigen::Matrix2s::Identity());
image_ptr->addFeature(feat_point_image_ptr);
FrameBasePtr anchor_frame = std::make_shared< FrameBase>(t,std::make_shared<StateBlock>(frame_val.head(3)), std::make_shared<StateQuaternion>(frame_val.tail(4)));
//FrameBasePtr anchor_frame = wolf_problem_ptr_->getTrajectory()->getLastFrame();
// create the landmark
Eigen::Vector2s point2D;
point2D[0] = feat_point_image_ptr->getKeypoint().pt.x;
point2D[1] = feat_point_image_ptr->getKeypoint().pt.y;
std::cout << "point2D: " << point2D.transpose() << std::endl;
Scalar distance = 2; // arbitrary value
Eigen::Vector4s vec_homogeneous;
Eigen::VectorXs correction_vec = (std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getCorrectionVector();
std::cout << "correction vector: " << correction_vec << std::endl;
std::cout << "distortion vector: " << (std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getDistortionVector() << std::endl;
point2D = pinhole::depixellizePoint(image_ptr->getSensor()->getIntrinsic()->getState(),point2D);
std::cout << "point2D depixellized: " << point2D.transpose() << std::endl;
point2D = pinhole::undistortPoint((std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getCorrectionVector(),point2D);
std::cout << "point2D undistorted: " << point2D.transpose() << std::endl;
Eigen::Vector3s point3D;
point3D.head(2) = point2D;
point3D(2) = 1;
point3D.normalize();
std::cout << "point3D normalized: " << point3D.transpose() << std::endl;
vec_homogeneous = {point3D(0),point3D(1),point3D(2),1/distance};
std::cout << "vec_homogeneous: " << vec_homogeneous.transpose() << std::endl;
LandmarkAHPPtr landmark = std::make_shared<LandmarkAHP>(vec_homogeneous, anchor_frame, image_ptr->getSensor(), feat_point_image_ptr->getDescriptor());
std::cout << "Landmark AHP created" << std::endl;
// Create the factor
FactorAHPPtr factor_ptr = std::make_shared<FactorAHP>(feat_point_image_ptr, std::static_pointer_cast<LandmarkAHP>(landmark), processor_ptr);
feat_point_image_ptr->addFactor(factor_ptr);
std::cout << "Factor AHP created" << std::endl;
Eigen::Vector2s point2D_proj = point3D.head<2>()/point3D(2);
std::cout << "point2D projected: " << point2D_proj.transpose() << std::endl;
Eigen::Vector2s point2D_dist;
point2D_dist = pinhole::distortPoint((std::static_pointer_cast<SensorCamera>(image_ptr->getSensor()))->getDistortionVector(),point2D_proj);
std::cout << "point2D distorted: " << point2D_dist.transpose() << std::endl;
Eigen::Vector2s point2D_pix;
point2D_pix = pinhole::pixellizePoint(image_ptr->getSensor()->getIntrinsic()->getState(),point2D_dist);
std::cout << "point2D pixellized: " << point2D_pix.transpose() << std::endl;
Eigen::Vector2s expectation;
Eigen::Vector3s current_frame_p = last_frame->getP()->getState();
Eigen::Vector4s current_frame_o = last_frame->getO()->getState();
Eigen::Vector3s anchor_frame_p = landmark->getAnchorFrame()->getP()->getState();
Eigen::Vector4s anchor_frame_o = landmark->getAnchorFrame()->getO()->getState();
Eigen::Vector4s landmark_ = landmark->getP()->getState();
factor_ptr ->expectation(current_frame_p.data(), current_frame_o.data(),
anchor_frame_p.data(), anchor_frame_o.data(),
landmark_.data(), expectation.data());
// current_frame p; current_frame o; anchor_frame p; anchor_frame o; homogeneous_vector landmark, residual
std::cout << "expectation computed" << std::endl;
std::cout << "expectation = " << expectation[0] << " " << expectation[1] << std::endl;
return 0;
}
//std
#include <iostream>
#include "core/processor/processor_tracker_landmark_image.h"
//Wolf
#include "core/common/wolf.h"
#include "core/problem/problem.h"
#include "core/state_block/state_block.h"
#include "core/processor/processor_odom_3D.h"
#include "core/sensor/sensor_odom_3D.h"
#include "core/sensor/sensor_camera.h"
#include "core/capture/capture_image.h"
#include "core/capture/capture_pose.h"
#include "core/ceres_wrapper/ceres_manager.h"
// Vision utils includes
#include <vision_utils.h>
#include <sensors.h>
#include <common_class/buffer.h>
#include <common_class/frame.h>
using Eigen::Vector3s;
using Eigen::Vector4s;
using Eigen::Vector6s;
using Eigen::Vector7s;
using namespace wolf;
void cleanupMap(const ProblemPtr& _problem, const TimeStamp& _t, Scalar _dt_max,
SizeEigen _min_factors)
{
std::list<LandmarkBasePtr> lmks_to_remove;
for (auto lmk : _problem->getMap()->getLandmarkList())
{
TimeStamp t0 = std::static_pointer_cast<LandmarkAHP>(lmk)->getAnchorFrame()->getTimeStamp();
if (_t - t0 > _dt_max)
if (lmk->getConstrainedByList().size() <= _min_factors)
lmks_to_remove.push_back(lmk);
}
for (auto lmk : lmks_to_remove)
{
WOLF_DEBUG("Clean up L" , lmk->id() );
lmk->remove();
}
}
Eigen::MatrixXs computeDataCovariance(const VectorXs& _data)
{
Scalar k = 0.5;
Scalar dist = _data.head<3>().norm();
if ( dist == 0 ) dist = 1.0;
WOLF_DEBUG("dist: ", dist, "; sigma: ", sqrt(k* (dist + 0.1)) );
return k * (dist + 0.1) * Matrix6s::Identity();
}
int main(int argc, char** argv)
{
std::cout << std::endl << "==================== processor image landmark test ======================" << std::endl;
// Sensor or sensor recording
vision_utils::SensorCameraPtr sen_ptr = vision_utils::askUserSource(argc, argv);
if (sen_ptr==NULL)
return 0;
unsigned int buffer_size = 10;
vision_utils::Buffer<vision_utils::FramePtr> frame_buff(buffer_size);
frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), 0) );
unsigned int img_width = frame_buff.back()->getImage().cols;
unsigned int img_height = frame_buff.back()->getImage().rows;
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
//=====================================================
// Environment variable for configuration files
std::string wolf_root = _WOLF_ROOT_DIR;
std::cout << wolf_root << std::endl;
//=====================================================
//=====================================================
// Wolf problem
ProblemPtr problem = Problem::create("PO", 3);
// ODOM SENSOR AND PROCESSOR
SensorBasePtr sensor_base = problem->installSensor("ODOM 3D", "odom", (Vector7s()<<0,0,0,0,0,0,1).finished(), wolf_root + "/src/examples/sensor_odom_3D.yaml");
SensorOdom3DPtr sensor_odom = std::static_pointer_cast<SensorOdom3D>(sensor_base);
ProcessorBasePtr prcocessor_base = problem->installProcessor("ODOM 3D", "odometry integrator", "odom", wolf_root + "/src/examples/processor_odom_3D.yaml");
// CAMERA SENSOR AND PROCESSOR
sensor_base = problem->installSensor("CAMERA", "PinHole", (Vector7s()<<0,0,0,0,0,0,1).finished(), wolf_root + "/src/examples/camera_params_ueye_sim.yaml");
SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(sensor_base);
camera->setImgWidth(img_width);
camera->setImgHeight(img_height);
ProcessorTrackerLandmarkImagePtr prc_img_ptr = std::static_pointer_cast<ProcessorTrackerLandmarkImage>( problem->installProcessor("IMAGE LANDMARK", "ORB", "PinHole", wolf_root + "/src/examples/processor_image_feature.yaml") );
//=====================================================
//=====================================================
// Origin Key Frame is fixed
TimeStamp t = 0;
FrameBasePtr origin_frame = problem->emplaceFrame(KEY, (Vector7s()<<1,0,0,0,0,0,1).finished(), t);
problem->getProcessorMotion()->setOrigin(origin_frame);
origin_frame->fix();
std::cout << "t: " << 0 << " \t\t\t x = ( " << problem->getCurrentState().transpose() << ")" << std::endl;
std::cout << "--------------------------------------------------------------" << std::endl;
//=====================================================
//=====================================================
// running CAPTURES preallocated
CaptureImagePtr image;
Vector6s data(Vector6s::Zero()); // will integrate this data repeatedly
CaptureMotionPtr cap_odo = std::make_shared<CaptureMotion>("IMAGE", t, sensor_odom, data, 7, 6, nullptr);
//=====================================================
//=====================================================
// Ceres wrapper
ceres::Solver::Options ceres_options;
// ceres_options.minimizer_type = ceres::TRUST_REGION; //ceres::TRUST_REGION;LINE_SEARCH
// ceres_options.max_line_search_step_contraction = 1e-3;
// ceres_options.minimizer_progress_to_stdout = false;
// ceres_options.line_search_direction_type = ceres::LBFGS;
ceres_options.max_num_iterations = 10;
google::InitGoogleLogging(argv[0]);
CeresManager ceres_manager(problem, ceres_options);
//=====================================================
//=====================================================
// graphics
cv::namedWindow("Feature tracker"); // Creates a window for display.
cv::moveWindow("Feature tracker", 0, 0);
cv::startWindowThread();
//=====================================================
//=====================================================
// main loop
unsigned int number_of_KFs = 0;
Scalar dt = 0.04;
for(int frame_count = 0; frame_count<10000; ++frame_count)
{
t += dt;
// Image ---------------------------------------------
frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), frame_count) );
// Preferred method with factory objects:
image = std::make_shared<CaptureImage>(t, camera, frame_buff.back()->getImage());
WOLF_DEBUG(__LINE__);
/* process */
camera->process(image);
WOLF_DEBUG(__LINE__);
// Odometry --------------------------------------------
cap_odo->setTimeStamp(t);
// previous state
FrameBasePtr prev_key_fr_ptr = problem->getLastKeyFrame();
// Eigen::Vector7s x_prev = problem->getCurrentState();
Eigen::Vector7s x_prev = prev_key_fr_ptr->getState();
// before the previous state
FrameBasePtr prev_prev_key_fr_ptr = nullptr;
Vector7s x_prev_prev;
Vector7s dx;
for (auto f_it = problem->getTrajectory()->getFrameList().rbegin(); f_it != problem->getTrajectory()->getFrameList().rend(); f_it++)
if ((*f_it) == prev_key_fr_ptr)
{
f_it++;
if (f_it != problem->getTrajectory()->getFrameList().rend())
{
prev_prev_key_fr_ptr = (*f_it);
}
break;
}
// compute delta state, and odometry data
if (!prev_prev_key_fr_ptr)
{
// we have just one state --> odometry data is zero
data.setZero();
}
else
{
x_prev_prev = prev_prev_key_fr_ptr->getState();
// define local variables on top of existing vectors to avoid memory allocation
Eigen::Vector3s p_prev_prev(x_prev_prev.data());
Eigen::Quaternions q_prev_prev(x_prev_prev.data() + 3);
Eigen::Vector3s p_prev(x_prev.data());
Eigen::Quaternions q_prev(x_prev.data() + 3);
Eigen::Vector3s dp(dx.data());
Eigen::Quaternions dq(dx.data() + 3);
// delta state PQ
dp = q_prev_prev.conjugate() * (p_prev - p_prev_prev);
dq = q_prev_prev.conjugate() * q_prev;
// odometry data
data.head<3>() = dp;
data.tail<3>() = q2v(dq);
}
Matrix6s data_cov = computeDataCovariance(data);
cap_odo->setData(data);
cap_odo->setDataCovariance(data_cov);
sensor_odom->process(cap_odo);
// problem->print(2,1,0,0);
// std::cout << "prev prev ts: " << t_prev_prev.get() << "; x: " << x_prev_prev.transpose() << std::endl;
// std::cout << "prev ts: " << t_prev.get() << "; x: " << x_prev.transpose() << std::endl;
// std::cout << "current ts: " << t.get() << std::endl;
// std::cout << " dt: " << t_prev - t_prev_prev << "; dx: " << dx.transpose() << std::endl;
// Cleanup map ---------------------------------------
cleanupMap(problem, t, 2, 5); // dt, min_ctr
// Solve -----------------------------------------------
// solve only when new KFs are added
if (problem->getTrajectory()->getFrameList().size() > number_of_KFs)
{
number_of_KFs = problem->getTrajectory()->getFrameList().size();
std::string summary = ceres_manager.solve(SolverManager::ReportVerbosity::BRIEF);// 0: nothing, 1: BriefReport, 2: FullReport
std::cout << summary << std::endl;
}
// Finish loop -----------------------------------------
cv::Mat image_graphics = frame_buff.back()->getImage().clone();
prc_img_ptr->drawTrackerRoi(image_graphics, cv::Scalar(255.0, 0.0, 255.0)); //tracker roi
prc_img_ptr->drawRoi(image_graphics, prc_img_ptr->detector_roi_, cv::Scalar(0.0,255.0, 255.0)); //active search roi
prc_img_ptr->drawLandmarks(image_graphics);
prc_img_ptr->drawFeaturesFromLandmarks(image_graphics);
cv::imshow("Feature tracker", image_graphics);
cv::waitKey(1);
std::cout << "=================================================================================================" << std::endl;
}
// problem->print(2);
problem.reset();
return 0;
}
This diff is collapsed.
/**
* \file test_simple_AHP.cpp
*
* Created on: 2 nov. 2016
* \author: jtarraso
*/
#include "core/common/wolf.h"
#include "core/frame/frame_base.h"
#include "core/math/pinhole_tools.h"
#include "core/sensor/sensor_camera.h"
#include "core/math/rotations.h"
#include "core/capture/capture_image.h"
#include "core/landmark/landmark_AHP.h"
#include "core/factor/factor_AHP.h"
#include "core/ceres_wrapper/ceres_manager.h"
// Vision utils
#include <vision_utils/vision_utils.h>
/**
* This test simulates the following situation:
* - A kf at the origin, we use it as anchor: kf1
* - A kf at the origin, we use it to project lmks onto the anchor frame: kf2
* - A kf at 1m to the right of the origin, kf3
* - A kf at 1m to the left of the origin, kf4
* - A lmk at 1m distance in front of the anchor: L1
* - we use this lmk to project it onto kf2, kf3 and kf4 and obtain measured pixels p0, p1 and p2
* - A lmk initialized at kf1, with measurement p0, at a distance of 2m: L2
* - we project the pixels p3 and p4: we observe that they do not match p1 and p2
* - we use the measurements p1 and p2 to solve the optimization problem on L2: L2 should converge to L1.
* - This is a sketch of the situation:
* - X, Y are the axes in world frame
* - x, z are the axes in anchor camera frame. We have that X=z, Y=-x.
* - Axes Z and y are perpendicular to the drawing; they have no effect.
*
* X,z
* ^
* |
* L2 * 2
* .|.
* . | .
* . | .
* . | .
* . L1 * 1 .
* . . | . .
* . . | . .
* p4 . . | . . p3
* .. p2 | p1 ..
* Y <--+---------+---------+
* -x +1 0 -1
* kf4 kf1 kf3
* kf2
*
* camera: (uo,vo) = (320,240)
* (au,av) = (320,320)
*
* projection geometry:
*
* 1:1 2:1 1:0 2:1 1:1
* 0 160 320 480 640
* +----+----+----+----+
* |
* | 320
* |
* *
*
* projected pixels:
* p0 = (320,240) // at optical axis or relation 1:0
* p1 = ( 0 ,240) // at 45 deg or relation 1:1
* p2 = (640,240) // at 45 deg or relation 1:1
* p3 = (160,240) // at a relation 2:1
* p4 = (480,240) // at a relation 2:1
*
*/
int main(int argc, char** argv)
{
using namespace wolf;
using namespace Eigen;
/* 1. crear 2 kf, fixed
* 2. crear 1 sensor
* 3. crear 1 lmk1
* 4. projectar lmk sobre sensor a fk1
* 5. projectar lmk sobre sensor a kf4
* 6. // esborrar lmk lmk_ptr->remove() no cal
* 7. crear nous kf
* 8. crear captures
* 9. crear features amb les mesures de 4 i 5
* 10. crear lmk2 des de kf3
* 11. crear factor des del kf4 a lmk2, amb ancora al kf3
* 12. solve
* 13. lmk1 == lmk2 ?
*/
// ============================================================================================================
/* 1 */
ProblemPtr problem = Problem::create("PO", 3);
// One anchor frame to define the lmk, and a copy to make a factor
FrameBasePtr kf_1 = problem->emplaceFrame(KEY,(Vector7s()<<0,0,0,0,0,0,1).finished(), TimeStamp(0));
FrameBasePtr kf_2 = problem->emplaceFrame(KEY,(Vector7s()<<0,0,0,0,0,0,1).finished(), TimeStamp(0));
// and two other frames to observe the lmk
FrameBasePtr kf_3 = problem->emplaceFrame(KEY,(Vector7s()<<0,-1,0,0,0,0,1).finished(), TimeStamp(0));
FrameBasePtr kf_4 = problem->emplaceFrame(KEY,(Vector7s()<<0,+1,0,0,0,0,1).finished(), TimeStamp(0));
kf_1->fix();
kf_2->fix();
kf_3->fix();
kf_4->fix();
// ============================================================================================================
// ============================================================================================================
/* 2 */
std::string wolf_root = _WOLF_ROOT_DIR;
SensorBasePtr sensor_ptr = problem->installSensor("CAMERA", "PinHole", (Vector7s()<<0,0,0,-0.5,0.5,-0.5,0.5).finished(), wolf_root + "/src/examples/camera_params_ueye_sim.yaml");
SensorCameraPtr camera = std::static_pointer_cast<SensorCamera>(sensor_ptr);
// ============================================================================================================
// ============================================================================================================
/* 3 */
Eigen::Vector3s lmk_dir = {0,0,1}; // in the optical axis of the anchor camera at kf1
std::cout << std::endl << "lmk: " << lmk_dir.transpose() << std::endl;
lmk_dir.normalize();
Eigen::Vector4s lmk_hmg_c;
Scalar distance = 1.0; // from anchor at kf1
lmk_hmg_c = {lmk_dir(0),lmk_dir(1),lmk_dir(2),(1/distance)};
// std::cout << "lmk hmg in C frame: " << lmk_hmg_c.transpose() << std::endl;
// ============================================================================================================
// Captures------------------
cv::Mat cv_image;
cv_image.zeros(2,2,0);
CaptureImagePtr image_0 = std::make_shared<CaptureImage>(TimeStamp(0), camera, cv_image);
CaptureImagePtr image_1 = std::make_shared<CaptureImage>(TimeStamp(1), camera, cv_image);
CaptureImagePtr image_2 = std::make_shared<CaptureImage>(TimeStamp(2), camera, cv_image);
kf_2->addCapture(image_0);
kf_3->addCapture(image_1);
kf_4->addCapture(image_2);
// Features-----------------
cv::Mat desc;
cv::KeyPoint kp_0;
FeaturePointImagePtr feat_0 = std::make_shared<FeaturePointImage>(kp_0, 0, desc, Eigen::Matrix2s::Identity());
image_0->addFeature(feat_0);
cv::KeyPoint kp_1;
FeaturePointImagePtr feat_1 = std::make_shared<FeaturePointImage>(kp_1, 0, desc, Eigen::Matrix2s::Identity());
image_1->addFeature(feat_1);
cv::KeyPoint kp_2;
FeaturePointImagePtr feat_2 = std::make_shared<FeaturePointImage>(kp_2, 0, desc, Eigen::Matrix2s::Identity());
image_2->addFeature(feat_2);
// Landmark--------------------
LandmarkAHPPtr lmk_1 = std::make_shared<LandmarkAHP>(lmk_hmg_c, kf_1, camera, desc);
problem->addLandmark(lmk_1);
lmk_1->fix();
std::cout << "Landmark 1: " << lmk_1->point().transpose() << std::endl;
// Factors------------------
FactorAHPPtr fac_0 = FactorAHP::create(feat_0, lmk_1, nullptr);
feat_0->addFactor(fac_0);
FactorAHPPtr fac_1 = FactorAHP::create(feat_1, lmk_1, nullptr);
feat_1->addFactor(fac_1);
FactorAHPPtr fac_2 = FactorAHP::create(feat_2, lmk_1, nullptr);
feat_2->addFactor(fac_2);
// Projections----------------------------
Eigen::VectorXs pix_0 = fac_0->expectation();
kp_0 = cv::KeyPoint(pix_0(0), pix_0(1), 0);
feat_0->setKeypoint(kp_0);
Eigen::VectorXs pix_1 = fac_1->expectation();
kp_1 = cv::KeyPoint(pix_1(0), pix_1(1), 0);
feat_1->setKeypoint(kp_1);
Eigen::VectorXs pix_2 = fac_2->expectation();
kp_2 = cv::KeyPoint(pix_2(0), pix_2(1), 0);
feat_2->setKeypoint(kp_2);
std::cout << "pixel 0: " << pix_0.transpose() << std::endl;
std::cout << "pixel 1: " << pix_1.transpose() << std::endl;
std::cout << "pixel 2: " << pix_2.transpose() << std::endl;
//
//======== up to here the initial projections ==============
std::cout << "\n";
//======== now we want to estimate a new lmk ===============
//
// Features -----------------
FeaturePointImagePtr feat_3 = std::make_shared<FeaturePointImage>(kp_1, 0, desc, Eigen::Matrix2s::Identity());
image_1->addFeature(feat_3);
FeaturePointImagePtr feat_4 = std::make_shared<FeaturePointImage>(kp_2, 0, desc, Eigen::Matrix2s::Identity());
image_2->addFeature(feat_4);
// New landmark with measured pixels from kf2 (anchor) kf3 and kf4 (measurements)
Scalar unknown_distance = 2; // the real distance is 1
Matrix3s K = camera->getIntrinsicMatrix();
Vector3s pix_0_hmg;
pix_0_hmg << pix_0, 1;
Eigen::Vector3s dir_0 = K.inverse() * pix_0_hmg;
Eigen::Vector4s pnt_hmg_0;
pnt_hmg_0 << dir_0, 1/unknown_distance;
LandmarkAHPPtr lmk_2( std::make_shared<LandmarkAHP>(pnt_hmg_0, kf_2, camera, desc) );
problem->addLandmark(lmk_2);
std::cout << "Landmark 2: " << lmk_2->point().transpose() << std::endl;
// New factors from kf3 and kf4
FactorAHPPtr fac_3 = FactorAHP::create(feat_3, lmk_2, nullptr);
feat_3->addFactor(fac_3);
FactorAHPPtr fac_4 = FactorAHP::create(feat_4, lmk_2, nullptr);
feat_4->addFactor(fac_4);
Eigen::Vector2s pix_3 = fac_3->expectation();
Eigen::Vector2s pix_4 = fac_4->expectation();
std::cout << "pix 3: " << pix_3.transpose() << std::endl;
std::cout << "pix 4: " << pix_4.transpose() << std::endl;
// Wolf tree status ----------------------
problem->print(3);
// problem->check();
// ========== solve ==================================================================================================
/* 12 */
// Ceres wrapper
ceres::Solver::Options ceres_options;
ceres_options.minimizer_type = ceres::TRUST_REGION; //ceres::TRUST_REGION;LINE_SEARCH
ceres_options.max_line_search_step_contraction = 1e-3;
// ceres_options.minimizer_progress_to_stdout = false;
// ceres_options.line_search_direction_type = ceres::LBFGS;
// ceres_options.max_num_iterations = 100;
google::InitGoogleLogging(argv[0]);
CeresManager ceres_manager(problem, ceres_options);
std::string summary = ceres_manager.solve(SolverManager::ReportVerbosity::FULL);// 0: nothing, 1: BriefReport, 2: FullReport
std::cout << summary << std::endl;
// Test of convergence over the lmk params
bool pass = (lmk_2->point() - lmk_1->point()).isMuchSmallerThan(1,Constants::EPS);
std::cout << "Landmark 2 below should have converged to Landmark 1:" << std::endl;
std::cout << "Landmark 1: " << lmk_1->point().transpose() << std::endl;
std::cout << "Landmark 2: " << lmk_2->point().transpose() << std::endl;
std::cout << "Landmark convergence test " << (pass ? "PASSED" : "FAILED") << std::endl;
std::cout << std::endl;
if (!pass)
return -1;
return 0;
}
//std includes
#include <iostream>
// Vision utils
#include <vision_utils.h>
#include <sensors.h>
#include <common_class/buffer.h>
#include <common_class/frame.h>
#include <detectors/orb/detector_orb.h>
#include <descriptors/orb/descriptor_orb.h>
#include <matchers/bruteforce_hamming_2/matcher_bruteforce_hamming_2.h>
//Wolf
#include "core/processor/processor_tracker_landmark_image.h"
int main(int argc, char** argv)
{
using namespace wolf;
std::cout << std::endl << "==================== tracker ORB test ======================" << std::endl;
//=====================================================
// Environment variable for configuration files
std::string wolf_root = _WOLF_ROOT_DIR;
//=====================================================
//=====================================================
// Sensor or sensor recording
vision_utils::SensorCameraPtr sen_ptr = vision_utils::askUserSource(argc, argv);
if (sen_ptr==NULL)
return 0;
// Detector
vision_utils::DetectorParamsORBPtr params_det = std::make_shared<vision_utils::DetectorParamsORB>();
params_det->nfeatures = 500; // The maximum number of features to retain.
params_det->scaleFactor = 2; // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
params_det->nlevels = 8; // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
params_det->edgeThreshold = 16; // This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
params_det->firstLevel = 0; // It should be 0 in the current implementation.
params_det->WTA_K = 2; // The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
params_det->scoreType = cv::ORB::HARRIS_SCORE; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
params_det->patchSize = 31;
vision_utils::DetectorBasePtr det_b_ptr = vision_utils::setupDetector("ORB", "ORB detector", params_det);
vision_utils::DetectorORBPtr det_ptr = std::static_pointer_cast<vision_utils::DetectorORB>(det_b_ptr);
// Descriptor
vision_utils::DescriptorParamsORBPtr params_des = std::make_shared<vision_utils::DescriptorParamsORB>();
params_des->nfeatures = 500; // The maximum number of features to retain.
params_des->scaleFactor = 2; // Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
params_des->nlevels = 8; // The number of pyramid levels. The smallest level will have linear size equal to input_image_linear_size/pow(scaleFactor, nlevels).
params_des->edgeThreshold = 16; // This is size of the border where the features are not detected. It should roughly match the patchSize parameter.
params_des->firstLevel = 0; // It should be 0 in the current implementation.
params_des->WTA_K = 2; // The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
params_des->scoreType = cv::ORB::HARRIS_SCORE; //#enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
params_des->patchSize = 31;
vision_utils::DescriptorBasePtr des_b_ptr = vision_utils::setupDescriptor("ORB", "ORB descriptor", params_des);
vision_utils::DescriptorORBPtr des_ptr = std::static_pointer_cast<vision_utils::DescriptorORB>(des_b_ptr);
// Matcher
vision_utils::MatcherParamsBRUTEFORCE_HAMMING_2Ptr params_mat = std::make_shared<vision_utils::MatcherParamsBRUTEFORCE_HAMMING_2>();
vision_utils::MatcherBasePtr mat_b_ptr = vision_utils::setupMatcher("BRUTEFORCE_HAMMING_2", "BRUTEFORCE_HAMMING_2 matcher", params_mat);
vision_utils::MatcherBRUTEFORCE_HAMMING_2Ptr mat_ptr = std::static_pointer_cast<vision_utils::MatcherBRUTEFORCE_HAMMING_2>(mat_b_ptr);
//=====================================================
unsigned int buffer_size = 8;
vision_utils::Buffer<vision_utils::FramePtr> frame_buff(buffer_size);
frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), 0) );
unsigned int img_width = frame_buff.back()->getImage().cols;
unsigned int img_height = frame_buff.back()->getImage().rows;
std::cout << "Image size: " << img_width << "x" << img_height << std::endl;
cv::namedWindow("Feature tracker"); // Creates a window for display.
cv::moveWindow("Feature tracker", 0, 0);
cv::startWindowThread();
cv::imshow("Feature tracker", frame_buff.back()->getImage());
cv::waitKey(1);
KeyPointVector target_keypoints;
KeyPointVector tracked_keypoints_;
KeyPointVector tracked_keypoints_2;
KeyPointVector current_keypoints;
cv::Mat target_descriptors;
cv::Mat tracked_descriptors;
cv::Mat tracked_descriptors2;
cv::Mat current_descriptors;
cv::Mat image_original = frame_buff.back()->getImage();
cv::Mat image_graphics;
unsigned int roi_width = 200;
unsigned int roi_heigth = 200;
int n_first_1 = 0;
int n_second_1 = 0;
// Initial detection
target_keypoints = det_ptr->detect(image_original);
target_descriptors = des_ptr->getDescriptor(image_original, target_keypoints);
for (unsigned int f_num=0; f_num < 1000; ++f_num)
{
frame_buff.add( vision_utils::setFrame(sen_ptr->getImage(), f_num) );
KeyPointVector keypoints;
cv::Mat descriptors;
DMatchVector cv_matches;
cv::Mat image = frame_buff.back()->getImage();
image_graphics = image.clone();
bool matched = false;
n_first_1 = n_second_1 = 0;
unsigned int tracked_keypoints = 0;
for(unsigned int target_idx = 0; target_idx < target_keypoints.size(); target_idx++)
{
std::cout << "\npixel: " << target_keypoints[target_idx].pt << std::endl;
std::cout << "target_descriptor[" << target_idx << "]:\n" << target_descriptors.row(target_idx) << std::endl;
matched = false;
cv::Rect roi = vision_utils::setRoi(target_keypoints[target_idx].pt.x, target_keypoints[target_idx].pt.y, roi_width, roi_heigth);
cv::Point2f roi_up_left_corner;
roi_up_left_corner.x = roi.x;
roi_up_left_corner.y = roi.y;
for(unsigned int fr = 0; fr < 2; fr++)
{
keypoints = det_ptr->detect(image, roi);
descriptors = des_ptr->getDescriptor(image, keypoints);
cv::Mat target_descriptor; //B(cv::Rect(0,0,vec_length,1));
target_descriptor = target_descriptors(cv::Rect(0,target_idx,target_descriptors.cols,1));
if(keypoints.size() != 0)
{
mat_ptr->match(target_descriptor, descriptors, des_ptr->getSize(), cv_matches);
Scalar normalized_score = 1 - (Scalar)(cv_matches[0].distance)/(des_ptr->getSize()*8);
std::cout << "pixel: " << keypoints[cv_matches[0].trainIdx].pt + roi_up_left_corner << std::endl;
std::cout << "normalized score: " << normalized_score << std::endl;
if(normalized_score < 0.8)
{
std::cout << "not tracked" << std::endl;
}
else
{
std::cout << "tracked" << std::endl;
matched = true;
cv::Point2f point,t_point;
point.x = keypoints[cv_matches[0].trainIdx].pt.x;
point.y = keypoints[cv_matches[0].trainIdx].pt.y;
t_point.x = target_keypoints[target_idx].pt.x;
t_point.y = target_keypoints[target_idx].pt.y;
cv::circle(image_graphics, t_point, 4, cv::Scalar(51.0, 51.0, 255.0), -1, 3, 0);
cv::circle(image_graphics, point, 2, cv::Scalar(255.0, 255.0, 0.0), -1, 8, 0);
cv::putText(image_graphics, std::to_string(target_idx), point, cv:: FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255.0, 255.0, 0.0));
//introduce in list - tracked point
cv::KeyPoint tracked_kp = keypoints[cv_matches[0].trainIdx];
tracked_kp.pt.x = tracked_kp.pt.x + roi.x;
tracked_kp.pt.y = tracked_kp.pt.y + roi.y;
if(fr==0)
tracked_keypoints_.push_back(tracked_kp);
else
tracked_keypoints_2.push_back(tracked_kp);
cv::Mat tracked_desc;
tracked_desc = descriptors(cv::Rect(0,cv_matches[0].trainIdx,target_descriptors.cols,1));
if(fr==0)
tracked_descriptors.push_back(tracked_desc);
else
tracked_descriptors2.push_back(tracked_desc);
//introduce in list - target point
if(fr==0)
{
current_keypoints.push_back(target_keypoints[target_idx]);
current_descriptors.push_back(target_descriptor);
}
if (fr == 0 && normalized_score == 1)n_first_1++;
if (fr == 1 && normalized_score == 1)n_second_1++;
}
}
else
std::cout << "not tracked" << std::endl;
}
if (matched) tracked_keypoints++;
}
std::cout << "\ntracked keypoints: " << tracked_keypoints << "/" << target_keypoints.size() << std::endl;
std::cout << "percentage first: " << ((float)((float)tracked_keypoints/(float)target_keypoints.size()))*100 << "%" << std::endl;
std::cout << "Number of perfect first matches: " << n_first_1 << std::endl;
std::cout << "Number of perfect second matches: " << n_second_1 << std::endl;
if(tracked_keypoints == 0)
{
target_keypoints = det_ptr->detect(image);
target_descriptors = des_ptr->getDescriptor(image, target_keypoints);
std::cout << "number of new keypoints to be tracked: " << target_keypoints.size() << std::endl;
}
else
{
std::cout << "\n\nADVANCE" << std::endl;
// for(unsigned int i = 0; i < target_keypoints.size(); i++)
// {
// std::cout << "\ntarget keypoints";
// std::cout << target_keypoints[i].pt;
// }
// for(unsigned int i = 0; i < current_keypoints.size(); i++)
// {
// std::cout << "\ncurrent keypoints";
// std::cout << current_keypoints[i].pt;
// }
// for( int i = 0; i < target_descriptors.rows; i++)
// {
// std::cout << "\ntarget descriptors";
// std::cout << target_descriptors.row(i);
// }
// for( int i = 0; i < current_descriptors.rows; i++)
// {
// std::cout << "\ncurrent descriptors";
// std::cout << current_descriptors.row(i);
// }
// for( int i = 0; i < current_descriptors2.rows; i++)
// {
// std::cout << "\ncurrent descriptors2";
// std::cout << current_descriptors2.row(i);
// }
//target_keypoints.clear();
target_keypoints = current_keypoints;
current_descriptors.copyTo(target_descriptors);
current_keypoints.clear();
current_descriptors.release();
std::cout << "\nAFTER THE ADVANCE" << std::endl;
// for(unsigned int i = 0; i < target_keypoints.size(); i++)
// {
// std::cout << "target keypoints";
// std::cout << target_keypoints[i].pt << "\t" ;
// }
// for( int i = 0; i < target_descriptors.rows; i++)
// {
// std::cout << "\ntarget descriptors";
// std::cout << target_descriptors.row(i);
// }
std::cout << "\nEND OF ADVANCE\n";
}
tracked_keypoints = 0;
cv::imshow("Feature tracker", image_graphics);
cv::waitKey(1);
}
}
This diff is collapsed.