diff --git a/include/vision/factor/factor_pixel_hp.h b/include/vision/factor/factor_pixel_hp.h index 832602ec2d568abbd21f4f2ea17d34181834712a..0edfcb65ef32d02ccf74b0e7ccd46ac0c233ea00 100644 --- a/include/vision/factor/factor_pixel_hp.h +++ b/include/vision/factor/factor_pixel_hp.h @@ -130,45 +130,35 @@ inline void FactorPixelHp::expectation(const T* const _frame_p, { using namespace Eigen; - // All involved transforms typedef - typedef Eigen::Transform<T, 3, Eigen::Isometry> TransformType; - - // world to current robot transform - Map<const Matrix<T, 3, 1> > p_w_r(_frame_p); - Translation<T, 3> t_w_r(p_w_r); - Map<const Quaternion<T> > q_w_r(_frame_o); - TransformType T_w_r = t_w_r * q_w_r; - - // current robot to current camera transform - Map<const Matrix<T, 3, 1> > p_r_c(_sensor_p); - Translation<T, 3> t_r_c(p_r_c); - Map<const Quaternion<T> > q_r_c(_sensor_o); - TransformType T_r_c = t_r_c * q_r_c; - - // hmg point in current camera frame C - Eigen::Map<const Eigen::Matrix<T, 4, 1> > landmark_hmg(_lmk_hmg); - Eigen::Matrix<T, 4, 1> landmark_hmg_c = (T_w_r * T_r_c).inverse(Eigen::Isometry) - * landmark_hmg; - - //std::cout << "p_w_r = \n\t" << _frame_p[0] << "\n\t" << _frame_p[1] << "\n\t" << _frame_p[2] << "\n"; -// std::cout << "q_w_r = \n\t" << _frame_o[0] << "\n\t" << _frame_o[1] << "\n\t" << _frame_o[2] << "\n\t" << _frame_o[3] << "\n"; -// std::cout << "p_r_c = \n\t" << _sensor_p[0] << "\n\t" << _sensor_p[1] << "\n\t" << _sensor_p[2] << "\n"; -// std::cout << "q_r_c = \n\t" << _sensor_o[0] << "\n\t" << _sensor_o[1] << "\n\t" << _sensor_o[2] << "\n\t" << _sensor_o[3] << "\n"; -// std::cout << "landmark_hmg_c = \n\t" << landmark_hmg_c(0) << "\n\t" << landmark_hmg_c(1) << "\n\t" << landmark_hmg_c(2) << "\n\t" << landmark_hmg_c(3) << "\n"; - - // lmk direction vector - Eigen::Matrix<T, 3, 1> v_dir = landmark_hmg_c.head(3); - - // lmk inverse distance - T rho = landmark_hmg_c(3); - - // camera parameters - Matrix<T, 4, 1> intrinsic = intrinsic_.cast<T>(); - Eigen::Matrix<T, Eigen::Dynamic, 1> distortion = distortion_.cast<T>(); + // frames w: world; r: robot; c: camera + Map<const Matrix<T, 3, 1> > p_wr(_frame_p); + Map<const Quaternion<T> > q_wr(_frame_o); + Map<const Matrix<T, 3, 1> > p_rc(_sensor_p); + Map<const Quaternion<T> > q_rc(_sensor_o); + + // camera pose in world frame: transforms from camera to world + Matrix<T, 3, 1> p_wc = p_wr + q_wr * p_rc; + Quaternion<T> q_wc = q_wr*q_rc; + + // invert transform: from world to camera + // | R T |.inv = | R.tr -R.tr*T | == | q.conj -q.conj*T | + // | 0 1 | | 0 1 | | 0 1 | + Quaternion<T> q_cw = q_wc.conjugate(); + Matrix<T, 3, 1> p_cw = - (q_cw * p_wc); + + // landmark hmg + Matrix<T, 4, 1> lh_w(_lmk_hmg); + + // landmark dir vector in C frame + /* note: transforming hmg point to get direction vector v': + * | q T | * | v | = | q*v + T+w | --> v' = q*v + T+w + * | 0 1 | | w | | w | + */ + Matrix<T, 3, 1> v_dir = q_cw * lh_w.template head<3>() + p_cw * lh_w(3); // project point and exit Eigen::Map<Eigen::Matrix<T, 2, 1> > expectation(_expectation); - expectation = pinhole::projectPoint(intrinsic, distortion, v_dir/rho); + expectation = pinhole::projectPoint(intrinsic_, distortion_, v_dir); } @@ -186,7 +176,7 @@ inline bool FactorPixelHp::operator ()(const T* const _frame_p, // residual Eigen::Map<Eigen::Matrix<T, 2, 1> > residuals(_residuals); - residuals = getMeasurementSquareRootInformationUpper().cast<T>() * (expected - getMeasurement()); + residuals = getMeasurementSquareRootInformationUpper() * (expected - getMeasurement()); return true; } diff --git a/include/vision/math/pinhole_tools.h b/include/vision/math/pinhole_tools.h index 4d55810f014168f07ce1459c09b373324c5bd462..99d66a4e3341d5d353548adb39c9aca73d45bf79 100644 --- a/include/vision/math/pinhole_tools.h +++ b/include/vision/math/pinhole_tools.h @@ -272,8 +272,7 @@ Matrix<typename Derived2::Scalar, 2, 1> distortPoint(const MatrixBase<Derived1> EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived1); MatrixSizeCheck<2,1>::check(up); - SizeEigen n = d.size(); - if (n == 0) + if (d.size() == 0) return up; else { typename Derived2::Scalar r2 = up(0) * up(0) + up(1) * up(1); // this is the norm squared: r2 = ||u||^2 @@ -429,12 +428,13 @@ Matrix<typename Derived2::Scalar, 2, 1> pixellizePoint(const MatrixBase<Derived1 MatrixSizeCheck<4,1>::check(k); MatrixSizeCheck<2,1>::check(ud); + typedef typename Derived1::Scalar S; typedef typename Derived2::Scalar T; - T u_0 = k(0); - T v_0 = k(1); - T a_u = k(2); - T a_v = k(3); + const S& u_0 = k(0); + const S& v_0 = k(1); + const S& a_u = k(2); + const S& a_v = k(3); Matrix<T, 2, 1> u; u(0) = u_0 + a_u * ud(0); diff --git a/src/processor/processor_visual_odometry.cpp b/src/processor/processor_visual_odometry.cpp index 3025c32d3712e33f5cb4bac7d7a26310fc61ddab..5c783343195333401fecdebdfa35f1b8372b6d97 100644 --- a/src/processor/processor_visual_odometry.cpp +++ b/src/processor/processor_visual_odometry.cpp @@ -131,14 +131,6 @@ void ProcessorVisualOdometry::preProcess() capture_image_incoming_->setTracksOrigin(tracks_init); capture_image_incoming_->setTracksPrev(tracks_init); - // print a bar with the number of active tracks in incoming - std::string s; - for (int i = 0; i<capture_image_incoming_->getKeyPoints().size(); i++) - { - s += "#"; - } - WOLF_INFO("TRACKS: ", capture_image_incoming_->getKeyPoints().size(), " ", s); - auto __attribute__((unused)) dt_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count(); WOLF_DEBUG( "dt_preprocess (ms): " , dt_preprocess ); @@ -312,15 +304,6 @@ void ProcessorVisualOdometry::preProcess() capture_image_incoming_->setTracksOrigin(tracks_origin_incoming); auto __attribute__((unused)) dt_preprocess = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - t1).count(); - - // print a bar with the number of active tracks in incoming - std::string s; - for (int i = 0; i<capture_image_incoming_->getKeyPoints().size(); i++) - { - s += "#"; - } - WOLF_INFO("TRACKS: ", capture_image_incoming_->getKeyPoints().size(), " ", s); - WOLF_DEBUG( "dt_preprocess (ms): " , dt_preprocess ); return; @@ -539,6 +522,16 @@ void ProcessorVisualOdometry::postProcess() ++track_it; } + // print a bar with the number of active tracks in incoming + unsigned int n = capture_image_incoming_->getKeyPoints().size(); + std::string s(n/2, '#'); + WOLF_INFO("TRACKS: ", n, " ", s); + + n = getProblem()->getMap()->getLandmarkList().size(); + s = std::string(n/2, '-'); + WOLF_INFO("LMARKS: ", n, " ", s); + + } bool ProcessorVisualOdometry::voteForKeyFrame() const