OpenVINS Study Week 2
Introduction
지난 주차 스터디에서 나온 내용
FEJ ← > marginalization
sliding window 와 marginalized landmarks
여기에서 Jacobian 계산이 무엇을 위한 것인지? (버리는 것과 남겨서 사용하는 것)
UpdateMSCKF()
와 UpdateSLAM()
의 차이점
Contents
Code Analysis
run_subscribe_msckf.cpp
// Create our VIO system
VioManagerOptions params;
params. print_and_load (parser);
params.use_multi_threading_subs = true ;
sys = std :: make_shared < VioManager >(params);
#if ROS_AVAILABLE == 1
viz = std :: make_shared < ROS1Visualizer >(nh, sys);
viz-> setup_subscribers (parser);
ROS1Visualizer
ROS1Visualizer
에서 ROS 관련 내용을 처리. _app
변수로 VioManager
클래스를 사용하고 있어 여기서 State
나 필요한 함수들을 받아옴.
void ROS1Visualizer :: setup_subscribers ( std :: shared_ptr < ov_core :: YamlParser > parser ) {
// We need a valid parser
assert (parser != nullptr );
// Create imu subscriber (handle legacy ros param info)
std ::string topic_imu;
_nh->param < std ::string > ( "topic_imu" , topic_imu, "/imu0" );
parser-> parse_external ( "relative_config_imu" , "imu0" , "rostopic" , topic_imu);
sub_imu = _nh-> subscribe (topic_imu, 1000 , & ROS1Visualizer ::callback_inertial, this );
// Logic for sync stereo subscriber
// https://answers.ros.org/question/96346/subscribe-to-two-image_raws-with-one-function/?answer=96491#post-id-96491
if (_app-> get_params ().state_options.num_cameras == 2 ) {
// Read in the topics
std ::string cam_topic0, cam_topic1;
_nh->param < std ::string > ( "topic_camera" + std :: to_string ( 0 ), cam_topic0, "/cam" + std :: to_string ( 0 ) + "/image_raw" );
_nh->param < std ::string > ( "topic_camera" + std :: to_string ( 1 ), cam_topic1, "/cam" + std :: to_string ( 1 ) + "/image_raw" );
parser-> parse_external ( "relative_config_imucam" , "cam" + std :: to_string ( 0 ), "rostopic" , cam_topic0);
parser-> parse_external ( "relative_config_imucam" , "cam" + std :: to_string ( 1 ), "rostopic" , cam_topic1);
// Create sync filter (they have unique pointers internally, so we have to use move logic here...)
auto image_sub0 = std :: make_shared < message_filters :: Subscriber < sensor_msgs :: Image >>( * _nh, cam_topic0, 1 );
auto image_sub1 = std :: make_shared < message_filters :: Subscriber < sensor_msgs :: Image >>( * _nh, cam_topic1, 1 );
auto sync = std :: make_shared < message_filters :: Synchronizer < sync_pol >>( sync_pol ( 10 ), * image_sub0, * image_sub1);
sync-> registerCallback ( boost :: bind ( & ROS1Visualizer ::callback_stereo, this , _1, _2, 0 , 1 ));
// Append to our vector of subscribers
sync_cam. push_back (sync);
sync_subs_cam. push_back (image_sub0);
sync_subs_cam. push_back (image_sub1);
} else {
// Now we should add any non-stereo callbacks here
for ( int i = 0 ; i < _app-> get_params ().state_options.num_cameras; i ++ ) {
// read in the topic
std ::string cam_topic;
_nh->param < std ::string > ( "topic_camera" + std :: to_string (i), cam_topic, "/cam" + std :: to_string (i) + "/image_raw" );
parser-> parse_external ( "relative_config_imucam" , "cam" + std :: to_string (i), "rostopic" , cam_topic);
// create subscriber
subs_cam. push_back (_nh->subscribe < sensor_msgs ::Image > (cam_topic, 10 , boost :: bind ( & ROS1Visualizer ::callback_monocular, this , _1, i)));
}
}
}
여기서 callback_inertial
에서 IMU 토픽 전달
callback_stereo
와 callback_monocular
로 카메라 토픽 전달
void ROS1Visualizer :: callback_inertial ( const sensor_msgs :: Imu :: ConstPtr & msg ) {
// convert into correct format
ov_core ::ImuData message;
message.timestamp = msg->header.stamp. toSec ();
message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z;
message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z;
// send it to our VIO system
_app-> feed_measurement_imu (message);
visualize_odometry (message.timestamp);
// If the processing queue is currently active / running just return so we can keep getting measurements
// Otherwise create a second thread to do our update in an async manor
// The visualization of the state, images, and features will be synchronous with the update!
if (thread_update_running)
return ;
thread_update_running = true ;
std ::thread thread ([ & ] {
// Lock on the queue (prevents new images from appending)
std :: lock_guard < std :: mutex > lck ( camera_queue_mtx );
// Count how many unique image streams
std ::map <int , bool> unique_cam_ids;
for ( const auto & cam_msg : camera_queue) {
unique_cam_ids[cam_msg.sensor_ids. at ( 0 )] = true ;
}
// If we do not have enough unique cameras then we need to wait
// We should wait till we have one of each camera to ensure we propagate in the correct order
auto params = _app-> get_params ();
size_t num_unique_cameras = (params.state_options.num_cameras == 2 ) ? 1 : params.state_options.num_cameras;
if (unique_cam_ids. size () == num_unique_cameras) {
// Loop through our queue and see if we are able to process any of our camera measurements
// We are able to process if we have at least one IMU measurement greater than the camera time
double timestamp_imu_inC = message.timestamp - _app-> get_state ()->_calib_dt_CAMtoIMU-> value ()( 0 );
while ( ! camera_queue. empty () && camera_queue. at ( 0 ).timestamp < timestamp_imu_inC) {
double update_dt = 100.0 * (timestamp_imu_inC - camera_queue. at ( 0 ).timestamp);
_app-> feed_measurement_camera (camera_queue. at ( 0 ));
visualize ();
camera_queue. pop_front ();
}
}
thread_update_running = false ;
});
// If we are single threaded, then run single threaded
// Otherwise detach this thread so it runs in the background!
if ( ! _app-> get_params ().use_multi_threading_subs) {
thread. join ();
} else {
thread. detach ();
}
}
Unique camera stream 의 의미가 무엇인지?
to ensure we propagate in the correct order
어쨌든 unique camera 일치하면 _app->feed_measurement_camera(camera_queue.at(0));
해주고서 필요한 모든 것들 visualize()
VioManager.h
void feed_measurement_camera ( const ov_core :: CameraData & message ) { track_image_and_update (message); }
카메라 토픽 콜백함수인 callback_monocular
, callback_stereo
에서 들어온 이미지들이 채워지면 나머지 처리 가능.
여기서 timestamp 가지고 image drop 처리하는 거
camera_queue
를 sort 하는 이유는? 서로 다른 카메라에서 들어온 정보들을 처리하는 것인지?
void ROS1Visualizer :: visualize () {
// Return if we have already visualized
if (last_visualization_timestamp == _app-> get_state ()->_timestamp && _app-> initialized ())
return ;
last_visualization_timestamp = _app-> get_state ()->_timestamp;
// Start timing
// boost::posix_time::ptime rT0_1, rT0_2;
// rT0_1 = boost::posix_time::microsec_clock::local_time();
// publish current image (only if not multi-threaded)
if ( ! _app-> get_params ().use_multi_threading_pubs)
publish_images ();
// Return if we have not inited
if ( ! _app-> initialized ())
return ;
// Save the start time of this dataset
if ( ! start_time_set) {
rT1 = boost :: posix_time :: microsec_clock :: local_time ();
start_time_set = true ;
}
// publish state
publish_state ();
// publish points
publish_features ();
// Publish gt if we have it
publish_groundtruth ();
// Publish keyframe information
publish_loopclosure_information ();
// Save total state
if (save_total_state) {
ROSVisualizerHelper :: sim_save_total_state_to_file (_app-> get_state (), _sim, of_state_est, of_state_std, of_state_gt);
}
}
외부에서 받아온 IMU, 카메라 토픽들을 내부 Struct 로 담아서 전달해주는 함수들은 아래와 같음. (_app->feed_<SOMETHING>()
)
반대로 받아올 때는 _app->get_<SOMETHING>()
형식
// convert into correct format
ov_core ::ImuData message;
message.timestamp = msg->header.stamp. toSec ();
message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z;
message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z;
// send it to our VIO system
_app-> feed_measurement_imu (message);
while ( ! camera_queue. empty () && camera_queue. at ( 0 ).timestamp < timestamp_imu_inC) {
double update_dt = 100.0 * (timestamp_imu_inC - camera_queue. at ( 0 ).timestamp);
_app-> feed_measurement_camera (camera_queue. at ( 0 ));
VioManager
앞서 VioManager
클래스 생성자에서 본 알고리즘 수행을 위한 다른 클래스들을 생성함.
// Let's make a feature extractor
// NOTE: after we initialize we will increase the total number of feature tracks
// NOTE: we will split the total number of features over all cameras uniformly
int init_max_features = std :: floor (( double )params.init_options.init_max_features / ( double )params.state_options.num_cameras);
if (params.use_klt) {
trackFEATS = std :: shared_ptr < TrackBase >( new TrackKLT (state->_cam_intrinsics_cameras, init_max_features,
state->_options.max_aruco_features, params.use_stereo, params.histogram_method,
params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist));
} else {
trackFEATS = std :: shared_ptr < TrackBase >( new TrackDescriptor (
state->_cam_intrinsics_cameras, init_max_features, state->_options.max_aruco_features, params.use_stereo, params.histogram_method,
params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist, params.knn_ratio));
}
// Initialize our aruco tag extractor
if (params.use_aruco) {
trackARUCO = std :: shared_ptr < TrackBase >( new TrackAruco (state->_cam_intrinsics_cameras, state->_options.max_aruco_features,
params.use_stereo, params.histogram_method, params.downsize_aruco));
}
// Initialize our state propagator
propagator = std :: make_shared < Propagator >(params.imu_noises, params.gravity_mag);
// Our state initialize
initializer = std :: make_shared < ov_init :: InertialInitializer >(params.init_options, trackFEATS-> get_feature_database ());
// Make the updater!
updaterMSCKF = std :: make_shared < UpdaterMSCKF >(params.msckf_options, params.featinit_options);
updaterSLAM = std :: make_shared < UpdaterSLAM >(params.slam_options, params.aruco_options, params.featinit_options);
// If we are using zero velocity updates, then create the updater
if (params.try_zupt) {
updaterZUPT = std :: make_shared < UpdaterZeroVelocity >(params.zupt_options, params.imu_noises, trackFEATS-> get_feature_database (),
propagator, params.gravity_mag, params.zupt_max_velocity,
params.zupt_noise_multiplier, params.zupt_max_disparity);
}
위와 같이 feature tracker 관련은 param 설정에 따라 KLT 방식의 TrackKLT
과 Descriptor 방식의 TrackDescriptor
로 나뉨. VioManager
에서는 trackFEATS
변수명으로 고정해서 사용.
if (params.use_klt) {
trackFEATS = std :: shared_ptr < TrackBase >( new TrackKLT (state->_cam_intrinsics_cameras, init_max_features,
state->_options.max_aruco_features, params.use_stereo, params.histogram_method,
params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist));
} else {
trackFEATS = std :: shared_ptr < TrackBase >( new TrackDescriptor (
state->_cam_intrinsics_cameras, init_max_features, state->_options.max_aruco_features, params.use_stereo, params.histogram_method,
params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist, params.knn_ratio));
}
state->_cam_intrinsics_cameras
: camera calibration object which has all camera intrinsics in it
init_max_features
: number of features we want want to track (i.e. track 200 points from frame to frame)
state->_options.max_aruco_features
: the max id of the arucotags, so we ensure that we start our non-auroc features above this value
params.use_stereo
: if we should do stereo feature tracking or binocular
params.histogram_method
: what type of histogram pre-processing should be done (histogram eq?)
params.fast_threshold
: FAST detection threshold
params.grid_x, params.grid_y
: size of grid in the x-direction / u-direction, the y-direction / v-direction
params.min_px_dist
: features need to be at least this number pixels away from each other
보다시피 initialization 에서 feature 뽑아 올때 trackFEATS->get_feature_database()
로 사용 중.
VioManager
에서 trackFEATS
를 사용하는 함수들
track_image_and_update(const ov_core::CameraData &message_const)
trackFEATS->feed_new_camera(message);
do_feature_propagate_update(const ov_core::CameraData &message)
feats_lost = trackFEATS->get_feature_database()->features_not_containing_newer(state->_timestamp, false, true);
feats_marg = trackFEATS->get_feature_database()->features_containing(state->margtimestep(), false, true);
database 에서 각각 사용될 애들을 불러옴
std::shared_ptr<Feature> feat2 = trackFEATS->get_feature_database()->get_feature(landmark.second->_featid);
feats_slam
이라는 변수에 필요한 feature 들을 모두 넣는다.
후에 update 할 때도 DELAYED 와 UPDATE 를 구분해서 하는데, MSCKF 와 SLAM update 의 차이를 보다 정확히 알아야 할 필요가 있음.
// If we do not have VIO initialization, then try to initialize
// TODO: Or if we are trying to reset the system, then do that here!
if ( ! is_initialized_vio) {
is_initialized_vio = try_to_initialize (message);
if ( ! is_initialized_vio) {
double time_track = (rT2 - rT1). total_microseconds () * 1 e- 6 ;
PRINT_DEBUG (BLUE "[TIME]: %.4f seconds for tracking \n " RESET, time_track);
return ;
}
}
이 로직으로 어떻게 print 가 가능한 것인지
→ 처음에 나오고 똑같은 print 문에 propagation 파트에 있었음
estimator_config.yaml
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt : true # if true we will use KLT, otherwise use a ORB descriptor + robust matching
num_pts : 200 # number of points (per camera) we will extract and try to track
fast_threshold : 20 # threshold for fast extraction (warning: lower threshs can be expensive)
grid_x : 5 # extraction sub-grid count for horizontal direction (uniform tracking)
grid_y : 5 # extraction sub-grid count for vertical direction (uniform tracking)
min_px_dist : 10 # distance between features (features near each other provide less information)
knn_ratio : 0.70 # descriptor knn threshold for the top two descriptor matches
track_frequency : 21.0 # frequency we will perform feature tracking at (in frames per second / hertz)
downsample_cameras : false # will downsample image in half if true
num_opencv_threads : 4 # -1: auto, 0-1: serial, >1: number of threads
histogram_method : "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
ROS1Visualizer::callback_monocular
void ROS1Visualizer :: callback_monocular ( const sensor_msgs :: ImageConstPtr & msg0 , int cam_id0 )
// Create the measurement
ov_core ::CameraData message;
message.timestamp = cv_ptr->header.stamp. toSec ();
message.sensor_ids. push_back (cam_id0);
message.images. push_back (cv_ptr->image. clone ());
// Load the mask if we are using it, else it is empty
// TODO: in the future we should get this from external pixel segmentation
if (_app-> get_params ().use_mask) {
message.masks. push_back (_app-> get_params ().masks. at (cam_id0));
} else {
message.masks. push_back ( cv :: Mat :: zeros (cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC1));
}
// append it to our queue of images
std :: lock_guard < std :: mutex > lck ( camera_queue_mtx );
camera_queue. push_back (message);
std :: sort (camera_queue. begin (), camera_queue. end ());
Opticalflow
void TrackKLT::feed_new_camera()
void TrackKLT :: feed_new_camera ( const CameraData & message ) {
// Error check that we have all the data
if (message.sensor_ids. empty () || message.sensor_ids. size () != message.images. size () || message.images. size () != message.masks. size ()) {
std :: exit (EXIT_FAILURE);
}
// Preprocessing steps that we do not parallelize
// NOTE: DO NOT PARALLELIZE THESE!
// NOTE: These seem to be much slower if you parallelize them...
rT1 = boost :: posix_time :: microsec_clock :: local_time ();
size_t num_images = message.images. size ();
for ( size_t msg_id = 0 ; msg_id < num_images; msg_id ++ ) {
// Lock this data feed for this camera
size_t cam_id = message.sensor_ids. at (msg_id);
std ::lock_guard < std ::mutex > lck (mtx_feeds. at (cam_id));
// Histogram equalize
cv ::Mat img;
if (histogram_method == HistogramMethod ::HISTOGRAM) {
cv :: equalizeHist (message.images. at (msg_id), img);
} else if (histogram_method == HistogramMethod ::CLAHE) {
double eq_clip_limit = 10.0 ;
cv ::Size eq_win_size = cv :: Size ( 8 , 8 );
cv ::Ptr < cv ::CLAHE > clahe = cv :: createCLAHE (eq_clip_limit, eq_win_size);
clahe-> apply (message.images. at (msg_id), img);
} else {
img = message.images. at (msg_id);
}
// Extract image pyramid
std ::vector < cv ::Mat > imgpyr;
cv :: buildOpticalFlowPyramid (img, imgpyr, win_size, pyr_levels);
// Save!
img_curr[cam_id] = img;
img_pyramid_curr[cam_id] = imgpyr;
}
// Either call our stereo or monocular version
// If we are doing binocular tracking, then we should parallize our tracking
if (num_images == 1 ) {
feed_monocular (message, 0 );
} else if (num_images == 2 && use_stereo) {
feed_stereo (message, 0 , 1 );
} else if ( ! use_stereo) {
parallel_for_ ( cv :: Range ( 0 , ( int )num_images), LambdaBody ([ & ]( const cv :: Range & range ) {
for ( int i = range.start; i < range.end; i ++ ) {
feed_monocular (message, i);
}
}));
} else {
std :: exit (EXIT_FAILURE);
}
}
Histogram
buildOpticalFlowPyramid
feed_monocular or feed_sterero
TrackKLT::feed_monocular()
우선 extraction 은 cv::buildOpticalFlowPyramid
을 통해 우선 imgpyr
를 얻는다. 그 후에 perform_detection_monocular
로 이루어진다고 보면 됨.
std::unordered_map<size_t, std::vector<cv::KeyPoint>> pts_last;
가 비어있으면 필요한 데이터를 채움.
void TrackKLT :: feed_monocular ( const CameraData & message , size_t msg_id ) {
// Lock this data feed for this camera
size_t cam_id = message.sensor_ids. at (msg_id);
std ::lock_guard < std ::mutex > lck (mtx_feeds. at (cam_id));
// Get our image objects for this image
cv ::Mat img = img_curr. at (cam_id);
std ::vector < cv ::Mat > imgpyr = img_pyramid_curr. at (cam_id);
cv ::Mat mask = message.masks. at (msg_id);
rT2 = boost :: posix_time :: microsec_clock :: local_time ();
// If we didn't have any successful tracks last time, just extract this time
// This also handles, the tracking initalization on the first call to this extractor
if (pts_last[cam_id]. empty ()) {
// Detect new features
std ::vector < cv ::KeyPoint > good_left;
std ::vector <size_t> good_ids_left;
perform_detection_monocular (imgpyr, mask, good_left, good_ids_left);
// Save the current image and pyramid
std ::lock_guard < std ::mutex > lckv (mtx_last_vars);
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
return ;
}
진짜배기는 여기에서. 즉, 맨 처음 돌 때 빼고는 보통 마지막꺼에 대해서 perform_detection_monocular
가 이루어진다고 보면 될듯.
// First we should make that the last images have enough features so we can do KLT
// This will "top-off" our number of tracks so always have a constant number
int pts_before_detect = ( int )pts_last[cam_id]. size ();
auto pts_left_old = pts_last[cam_id];
auto ids_left_old = ids_last[cam_id];
perform_detection_monocular (img_pyramid_last[cam_id], img_mask_last[cam_id], pts_left_old, ids_left_old);
이걸 가지고 perform_matching
수행하여 mask_ll
과 pts_left_new
얻어옴.
// Our return success masks, and predicted new features
std ::vector < uchar > mask_ll;
std ::vector < cv ::KeyPoint > pts_left_new = pts_left_old;
// Lets track temporally
perform_matching (img_pyramid_last[cam_id], imgpyr, pts_left_old, pts_left_new, cam_id, cam_id, mask_ll);
assert (pts_left_new. size () == ids_left_old. size ());
// If any of our mask is empty, that means we didn't have enough to do ransac, so just return
if (mask_ll. empty ()) {
std ::lock_guard < std ::mutex > lckv (mtx_last_vars);
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
pts_last[cam_id]. clear ();
ids_last[cam_id]. clear ();
PRINT_ERROR (RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting..... \n " RESET);
return ;
}
완료 후에는 good_left
, good_ids_left
만 뽑아서 database 업데이트.
// Get our "good tracks"
std ::vector < cv ::KeyPoint > good_left;
std ::vector <size_t> good_ids_left;
// Loop through all left points
for ( size_t i = 0 ; i < pts_left_new. size (); i ++ ) {
// Ensure we do not have any bad KLT tracks (i.e., points are negative)
if (pts_left_new. at (i).pt.x < 0 || pts_left_new. at (i).pt.y < 0 || ( int )pts_left_new. at (i).pt.x >= img.cols ||
( int )pts_left_new. at (i).pt.y >= img.rows)
continue ;
// Check if it is in the mask
// NOTE: mask has max value of 255 (white) if it should be
if (( int )message.masks. at (msg_id).at <uint8_t> (( int )pts_left_new. at (i).pt.y, ( int )pts_left_new. at (i).pt.x) > 127 )
continue ;
// If it is a good track, and also tracked from left to right
if (mask_ll[i]) {
good_left. push_back (pts_left_new[i]);
good_ids_left. push_back (ids_left_old[i]);
}
}
// Update our feature database, with theses new observations
for ( size_t i = 0 ; i < good_left. size (); i ++ ) {
cv ::Point2f npt_l = camera_calib. at (cam_id)-> undistort_cv (good_left. at (i).pt);
database-> update_feature (good_ids_left. at (i), message.timestamp, cam_id, good_left. at (i).pt.x, good_left. at (i).pt.y, npt_l.x, npt_l.y);
}
// Move forward in time
{
std ::lock_guard < std ::mutex > lckv (mtx_last_vars);
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
}
}
이렇게 하면 feature 쪽 역할은 끝. 다시 VioManager::track_image_and_update()
에서 propagate 함.
feed_stereo
에서 별도로 추가적으로 해주는 내용들 체크.
Descriptor
void TrackDescriptor::feed_new_camera()
는 동일하므로 생략.
void TrackDescriptor::feed_monocular()
약간 로직의 차이는 있음. Optical flow 방식은 cv::buildOpticalFlowPyramid(img, imgpyr, win_size, pyr_levels);
가 필요.
void TrackDescriptor :: feed_monocular ( const CameraData & message , size_t msg_id ) {
// Lock this data feed for this camera
size_t cam_id = message.sensor_ids. at (msg_id);
std ::lock_guard < std ::mutex > lck (mtx_feeds. at (cam_id));
// Histogram equalize
cv ::Mat img, mask;
if (histogram_method == HistogramMethod ::HISTOGRAM) {
cv :: equalizeHist (message.images. at (msg_id), img);
} else if (histogram_method == HistogramMethod ::CLAHE) {
double eq_clip_limit = 10.0 ;
cv ::Size eq_win_size = cv :: Size ( 8 , 8 );
cv ::Ptr < cv ::CLAHE > clahe = cv :: createCLAHE (eq_clip_limit, eq_win_size);
clahe-> apply (message.images. at (msg_id), img);
} else {
img = message.images. at (msg_id);
}
mask = message.masks. at (msg_id);
// If we are the first frame (or have lost tracking), initialize our descriptors
if (pts_last. find (cam_id) == pts_last. end () || pts_last[cam_id]. empty ()) {
std ::vector < cv ::KeyPoint > good_left;
std ::vector <size_t> good_ids_left;
cv ::Mat good_desc_left;
perform_detection_monocular (img, mask, good_left, good_desc_left, good_ids_left);
std ::lock_guard < std ::mutex > lckv (mtx_last_vars);
img_last[cam_id] = img;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
desc_last[cam_id] = good_desc_left;
return ;
}
변수명에 약간 혼동이 있을 수 있음.
KLT tracker 의 경우 perform_detection_monocular(img_pyramid_last[cam_id], img_mask_last[cam_id], pts_left_old, ids_left_old);
로 넣어서 old 변수명을 그대로 사용해서 update 하지만,
Descriptor 방식에서는 아래와 같이 받아오고, desc_new
가 추가되었음.
// Our new keypoints and descriptor for the new image
std ::vector < cv ::KeyPoint > pts_new;
cv ::Mat desc_new;
std ::vector <size_t> ids_new;
// First, extract new descriptors for this new image
perform_detection_monocular (img, mask, pts_new, desc_new, ids_new);
이후 과정은 동일. robust_match
에서 하는 내용과 perform_matching
간의 차이를 보던지 하면 될듯.
어쨌든 database update 하면서 마무리.
// Our matches temporally
std ::vector < cv ::DMatch > matches_ll;
// Lets match temporally
robust_match (pts_last[cam_id], pts_new, desc_last[cam_id], desc_new, cam_id, cam_id, matches_ll);
// Get our "good tracks"
std ::vector < cv ::KeyPoint > good_left;
std ::vector <size_t> good_ids_left;
cv ::Mat good_desc_left;
// Count how many we have tracked from the last time
int num_tracklast = 0 ;
// Loop through all current left to right points
// We want to see if any of theses have matches to the previous frame
// If we have a match new->old then we want to use that ID instead of the new one
for ( size_t i = 0 ; i < pts_new. size (); i ++ ) {
// Loop through all left matches, and find the old "train" id
int idll = - 1 ;
for ( size_t j = 0 ; j < matches_ll. size (); j ++ ) {
if (matches_ll[j].trainIdx == ( int )i) {
idll = matches_ll[j].queryIdx;
}
}
// Then lets replace the current ID with the old ID if found
// Else just append the current feature and its unique ID
good_left. push_back (pts_new[i]);
good_desc_left. push_back (desc_new. row (( int )i));
if (idll != - 1 ) {
good_ids_left. push_back (ids_last[cam_id][idll]);
num_tracklast ++ ;
} else {
good_ids_left. push_back (ids_new[i]);
}
}
// Update our feature database, with theses new observations
for ( size_t i = 0 ; i < good_left. size (); i ++ ) {
cv ::Point2f npt_l = camera_calib. at (cam_id)-> undistort_cv (good_left. at (i).pt);
database-> update_feature (good_ids_left. at (i), message.timestamp, cam_id, good_left. at (i).pt.x, good_left. at (i).pt.y, npt_l.x, npt_l.y);
}
// Move forward in time
{
std ::lock_guard < std ::mutex > lckv (mtx_last_vars);
img_last[cam_id] = img;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
desc_last[cam_id] = good_desc_left;
}
}