diff --git a/src/publisher_vision.cpp b/src/publisher_vision.cpp index 4a3e5cb1fbd65a3f05746dc4338f875bdf953e75..578301ba4561eb079d7a866c715563c43ae9068d 100644 --- a/src/publisher_vision.cpp +++ b/src/publisher_vision.cpp @@ -91,18 +91,20 @@ void PublisherVisionDebug::publishDerived() // Get capture image auto cap_img = std::dynamic_pointer_cast<CaptureImage>(processor_vision_->getLast()); - auto track_matrix = processor_vision_->getTrackMatrix(); // ajouté + assert(cap_img != nullptr && "Received Capture is not of type CaptureImage!"); - assert(cap_img != nullptr); + auto track_matrix = processor_vision_->getTrackMatrix(); // copy track matrix - // Extract cv image + // Draw and publish debug image try { + // Extract cv image cv::Mat cv_img_debug; cv::cvtColor(cap_img->getImage(), cv_img_debug, cv::COLOR_GRAY2BGR); + // Draw all tracks bool show_track_ids = false; - showTracks(cv_img_debug, track_matrix, cap_img, RED, show_track_ids); + showTracks(cv_img_debug, track_matrix, cap_img, CYAN, show_track_ids); // Convert to image msg cv_bridge::CvImagePtr cv_msg = boost::make_shared<cv_bridge::CvImage>(); @@ -111,17 +113,7 @@ void PublisherVisionDebug::publishDerived() cv_msg->encoding = sensor_msgs::image_encodings::BGR8; cv_msg->image = cv_img_debug; - // cv_bridge::CvImagePtr cv_ptr; - // try - // { - // cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); //check if different from the one above - // } - // catch (cv_bridge::Exception& e) - // { - // ROS_ERROR("cv_bridge exception: %s", e.what()); - // return; - // } - + // convert to proper ROS type and publish auto cv_msg_ros = cv_msg->toImageMsg(); publisher_image_.publish(cv_msg_ros); @@ -244,7 +236,6 @@ void PublisherVisionDebug::showTracks(cv::Mat _image, COLORFEATURES _color_of_features, bool _show_track_ID) { - map<size_t, FeatureBasePtr> alive_features = _track_matrix.snapshot(_cap_img); int size_feature_kf_pix_ = 1; int size_feature_current_pix_ = 2; @@ -255,8 +246,14 @@ void PublisherVisionDebug::showTracks(cv::Mat _image, // std::cout << "\n\n\n Number of tracks: " << _tracks.size() << '\n'; // for (auto track = _tracks.begin(); track != _tracks.end(); ++track) //la map trackS + + map<size_t, FeatureBasePtr> alive_features = _track_matrix.snapshot(_cap_img); + + TimeStamp time_capture = _cap_img->getTimeStamp(); + for (auto alive_feat : alive_features) { + // get full track of feature Track track = _track_matrix.track(alive_feat.second->trackId()); if (track.size() == 0) { @@ -264,15 +261,41 @@ void PublisherVisionDebug::showTracks(cv::Mat _image, continue; } - // std::cout << track.size() << '\n'; - //track is a map <featureId, feature> + // determine color and luminosity of track std::vector<int> color = colorTrackAndFeatures(track.size(), 100, 450, max_feature_in_track, min_feature_in_track, _color_of_features); - for (auto ftr = std::next(track.rbegin()); ftr != track.rend(); ++ftr) //la map track + // iterator to current feature in track + Track::iterator ftr_current (track.find(time_capture)); + + // draw current feature + int thickness_curr = 0.1; // -1 -> filled empty + cv::circle(_image, + cv::Point(ftr_current->second->getMeasurement(0), + ftr_current->second->getMeasurement(1)), + size_feature_current_pix_, + CV_RGB(color[0], color[1], color[2]), + thickness_curr); + + // draw track ID close to current feature + if (_show_track_ID) //show features ID of current frame + { + int shift_text_x = 7; + int shift_text_y = 10; + cv::putText( + _image, + std::to_string(ftr_current->second->trackId()), + cv::Point(ftr_current->second->getMeasurement(0) - shift_text_x, + ftr_current->second->getMeasurement(1) - shift_text_y), + 1, 0.5, CV_RGB(color[0], color[1], color[2])); + } + + + // iterate track from current time to the beginning of track + for (Track::reverse_iterator ftr(ftr_current); ftr != track.rend(); ++ftr) // start at the feature of the capture to draw { auto previous = std::prev(ftr); //previous iterator @@ -281,40 +304,17 @@ void PublisherVisionDebug::showTracks(cv::Mat _image, && ftr->second->getCapture()->getFrame()->getProblem(); if (is_keyframe) { - //features of previous keyframes + // mark features of previous keyframes cv::circle(_image, cv::Point(ftr->second->getMeasurement(0), ftr->second->getMeasurement(1)), size_feature_kf_pix_, CV_RGB(color[0], color[1], color[2]), 0.1); } - // line in between keyframes (as well as last KF and current frame) + // draw line in between keyframes (as well as last KF and current frame) cv::line(_image, cv::Point(ftr->second->getMeasurement(0), ftr->second->getMeasurement(1)), cv::Point(previous->second->getMeasurement(0), previous->second->getMeasurement(1)), CV_RGB(color[0], color[1], color[2]), 1.5); } - // features of current frame - // std::cout << "TO" << '\n'; - // std::cout << track.size() << '\n'; - // bool toto = track.rbegin() == track.rend(); - // std::cout << toto << '\n'; - // std::cout << "track.rbegin()->second->getMeasurement(0) " << track.rbegin()->second->getMeasurement(0) << '\n'; - - int thickness_curr = 0.1; // -1 -> filled empty - cv::circle(_image, - cv::Point(track.rbegin()->second->getMeasurement(0), track.rbegin()->second->getMeasurement(1)), - size_feature_current_pix_, CV_RGB(color[0], color[1], color[2]), thickness_curr); - - if (_show_track_ID) //show features ID of current frame - { - int shift_text_x = 7; - int shift_text_y = 10; - cv::putText( - _image, - std::to_string(track.rbegin()->second->trackId()), - cv::Point(track.rbegin()->second->getMeasurement(0) - shift_text_x, - track.rbegin()->second->getMeasurement(1) - shift_text_y), - 1, 0.5, CV_RGB(color[0], color[1], color[2])); - } } }