Implement REPROJ_AVG_DEPTH

This commit is contained in:
Mateo de Mayo 2022-07-20 18:53:55 +00:00
parent 41b04f6292
commit 6f3f61c91f
38 changed files with 429 additions and 145 deletions

View File

@ -97,7 +97,6 @@
0.0007071067802939111 0.0007071067802939111
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [] "vignette": []
} }
} }

View File

@ -97,7 +97,6 @@
0.0007071067802939111 0.0007071067802939111
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [] "vignette": []
} }
} }

View File

@ -99,7 +99,6 @@
0.0007071067802939111 0.0007071067802939111
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [] "vignette": []
} }
} }

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.005, "config.optical_flow_epipolar_error": 0.005,
"config.optical_flow_levels": 3, "config.optical_flow_levels": 3,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "REPROJ_AVG_DEPTH",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.005, "config.optical_flow_epipolar_error": 0.005,
"config.optical_flow_levels": 3, "config.optical_flow_levels": 3,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "SAME_PIXEL",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.005, "config.optical_flow_epipolar_error": 0.005,
"config.optical_flow_levels": 3, "config.optical_flow_levels": 3,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "SAME_PIXEL",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.005, "config.optical_flow_epipolar_error": 0.005,
"config.optical_flow_levels": 3, "config.optical_flow_levels": 3,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "SAME_PIXEL",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -224,7 +224,6 @@
}, },
"mocap_time_offset_ns": 0, "mocap_time_offset_ns": 0,
"mocap_to_imu_offset_ns": 140763258159875, "mocap_to_imu_offset_ns": 140763258159875,
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0
"view_offset": [0, 0]
} }
} }

View File

@ -208,7 +208,6 @@
}, },
"mocap_time_offset_ns": 0, "mocap_time_offset_ns": 0,
"mocap_to_imu_offset_ns": 140763258159875, "mocap_to_imu_offset_ns": 140763258159875,
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0
"view_offset": [0, 0]
} }
} }

View File

@ -238,7 +238,6 @@
}, },
"mocap_time_offset_ns": 0, "mocap_time_offset_ns": 0,
"mocap_to_imu_offset_ns": 140763258159875, "mocap_to_imu_offset_ns": 140763258159875,
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0
"view_offset": [0, 0]
} }
} }

View File

@ -15,6 +15,8 @@
"config.optical_flow_epipolar_error" = 0.005 "config.optical_flow_epipolar_error" = 0.005
"config.optical_flow_levels" = 3 "config.optical_flow_levels" = 3
"config.optical_flow_skip_frames" = 1 "config.optical_flow_skip_frames" = 1
"config.optical_flow_matching_guess_type" = "SAME_PIXEL"
"config.optical_flow_matching_default_depth" = 2.0
"config.vio_linearization_type" = "ABS_QR" "config.vio_linearization_type" = "ABS_QR"
"config.vio_sqrt_marg" = true "config.vio_sqrt_marg" = true

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.001, "config.optical_flow_epipolar_error": 0.001,
"config.optical_flow_levels": 4, "config.optical_flow_levels": 4,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "SAME_PIXEL",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -246,7 +246,7 @@ def main():
"accel_bias_std": bias_std(j, "Accelerometer"), "accel_bias_std": bias_std(j, "Accelerometer"),
"gyro_bias_std": bias_std(j, "Gyro"), "gyro_bias_std": bias_std(j, "Gyro"),
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": view_offset(j), # "view_offset": view_offset(j),
"vignette": [], "vignette": [],
} }
} }

View File

@ -108,10 +108,6 @@
0.009999999873689375 0.009999999873689375
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [
248.7971689190779,
-5.785840906463989
],
"vignette": [] "vignette": []
} }
} }

View File

@ -115,10 +115,6 @@
0.009999999873689375 0.009999999873689375
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [
248.7971689190779,
-5.785840906463989
],
"vignette": [] "vignette": []
} }
} }

View File

@ -115,7 +115,6 @@
0.009999999873689375 0.009999999873689375
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [] "vignette": []
} }
} }

View File

@ -8,6 +8,8 @@
"config.optical_flow_epipolar_error": 0.005, "config.optical_flow_epipolar_error": 0.005,
"config.optical_flow_levels": 3, "config.optical_flow_levels": 3,
"config.optical_flow_skip_frames": 1, "config.optical_flow_skip_frames": 1,
"config.optical_flow_matching_guess_type": "REPROJ_AVG_DEPTH",
"config.optical_flow_matching_default_depth": 2.0,
"config.vio_linearization_type": "ABS_QR", "config.vio_linearization_type": "ABS_QR",
"config.vio_sqrt_marg": true, "config.vio_sqrt_marg": true,
"config.vio_max_states": 3, "config.vio_max_states": 3,

View File

@ -105,7 +105,6 @@
"mocap_time_offset_ns": 0, "mocap_time_offset_ns": 0,
"mocap_to_imu_offset_ns": 0, "mocap_to_imu_offset_ns": 0,
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [ "vignette": [
{ {
"value0": 0, "value0": 0,

View File

@ -105,7 +105,6 @@
"mocap_time_offset_ns": 0, "mocap_time_offset_ns": 0,
"mocap_to_imu_offset_ns": 0, "mocap_to_imu_offset_ns": 0,
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [ "vignette": [
{ {
"value0": 0, "value0": 0,

View File

@ -97,7 +97,6 @@
0.0001 0.0001
], ],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0,
"view_offset": [0, 0],
"vignette": [] "vignette": []
} }
} }

View File

@ -79,6 +79,7 @@ class FrameToFrameOpticalFlow : public OpticalFlowBase {
this->calib = calib.cast<Scalar>(); this->calib = calib.cast<Scalar>();
patch_coord = PatchT::pattern2.template cast<float>(); patch_coord = PatchT::pattern2.template cast<float>();
depth_guess = config.optical_flow_matching_default_depth;
if (calib.intrinsics.size() > 1) { if (calib.intrinsics.size() > 1) {
Eigen::Matrix4d Ed; Eigen::Matrix4d Ed;
@ -97,6 +98,8 @@ class FrameToFrameOpticalFlow : public OpticalFlowBase {
OpticalFlowInput::Ptr input_ptr; OpticalFlowInput::Ptr input_ptr;
while (true) { while (true) {
while (input_depth_queue.try_pop(depth_guess)) continue;
input_queue.pop(input_ptr); input_queue.pop(input_ptr);
if (!input_ptr.get()) { if (!input_ptr.get()) {
@ -186,7 +189,7 @@ class FrameToFrameOpticalFlow : public OpticalFlowBase {
const Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& const Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>&
transform_map_1, transform_map_1,
Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_2, Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_2,
Vector2 view_offset = Vector2::Zero()) const { bool matching = false) const {
size_t num_points = transform_map_1.size(); size_t num_points = transform_map_1.size();
std::vector<KeypointId> ids; std::vector<KeypointId> ids;
@ -210,26 +213,35 @@ class FrameToFrameOpticalFlow : public OpticalFlowBase {
const Eigen::AffineCompact2f& transform_1 = init_vec[r]; const Eigen::AffineCompact2f& transform_1 = init_vec[r];
Eigen::AffineCompact2f transform_2 = transform_1; Eigen::AffineCompact2f transform_2 = transform_1;
transform_2.translation() -= view_offset;
bool valid = transform_2.translation()(0) >= 0 && auto t1 = transform_1.translation();
transform_2.translation()(1) && auto t2 = transform_2.translation();
transform_2.translation()(0) < pyr_2.lvl(0).w &&
transform_2.translation()(1) < pyr_2.lvl(0).h; Eigen::Vector2f off{0, 0};
MatchingGuessType guess_type = config.optical_flow_matching_guess_type;
if (matching && guess_type != MatchingGuessType::SAME_PIXEL) {
off = calib.viewOffset(t1, depth_guess).template cast<float>();
transforms->input_images->depth_guess = depth_guess; // For UI
}
t2 -= off; // This modifies transform_2
bool valid = t2(0) >= 0 && t2(1) >= 0 && t2(0) < pyr_2.lvl(0).w &&
t2(1) < pyr_2.lvl(0).h;
if (!valid) continue; if (!valid) continue;
valid = trackPoint(pyr_1, pyr_2, transform_1, transform_2); valid = trackPoint(pyr_1, pyr_2, transform_1, transform_2);
if (!valid) continue; if (!valid) continue;
Eigen::AffineCompact2f transform_1_recovered = transform_2; Eigen::AffineCompact2f transform_1_recovered = transform_2;
transform_1_recovered.translation() += view_offset; auto t1_recovered = transform_1_recovered.translation();
t1_recovered += off;
valid = trackPoint(pyr_2, pyr_1, transform_2, transform_1_recovered); valid = trackPoint(pyr_2, pyr_1, transform_2, transform_1_recovered);
if (!valid) continue; if (!valid) continue;
Scalar dist2 = Scalar dist2 = (t1 - t1_recovered).squaredNorm();
(transform_1.translation() - transform_1_recovered.translation())
.squaredNorm();
if (dist2 < config.optical_flow_max_recovered_dist2) { if (dist2 < config.optical_flow_max_recovered_dist2) {
result[id] = transform_2; result[id] = transform_2;
@ -341,8 +353,7 @@ class FrameToFrameOpticalFlow : public OpticalFlowBase {
} }
if (calib.intrinsics.size() > 1) { if (calib.intrinsics.size() > 1) {
trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1, trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1, true);
calib.view_offset);
for (const auto& kv : new_poses1) { for (const auto& kv : new_poses1) {
transforms->observations.at(1).emplace(kv); transforms->observations.at(1).emplace(kv);

View File

@ -81,6 +81,7 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
this->calib = calib.cast<Scalar>(); this->calib = calib.cast<Scalar>();
patch_coord = PatchT::pattern2.template cast<float>(); patch_coord = PatchT::pattern2.template cast<float>();
depth_guess = config.optical_flow_matching_default_depth;
if (calib.intrinsics.size() > 1) { if (calib.intrinsics.size() > 1) {
Eigen::Matrix4d Ed; Eigen::Matrix4d Ed;
@ -99,12 +100,15 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
OpticalFlowInput::Ptr input_ptr; OpticalFlowInput::Ptr input_ptr;
while (true) { while (true) {
while (input_depth_queue.try_pop(depth_guess)) continue;
input_queue.pop(input_ptr); input_queue.pop(input_ptr);
if (!input_ptr.get()) { if (!input_ptr.get()) {
if (output_queue) output_queue->push(nullptr); if (output_queue) output_queue->push(nullptr);
break; break;
} }
input_ptr->addTime("frames_received");
processFrame(input_ptr->t_ns, input_ptr); processFrame(input_ptr->t_ns, input_ptr);
} }
@ -182,6 +186,7 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
} }
if (frame_counter % config.optical_flow_skip_frames == 0) { if (frame_counter % config.optical_flow_skip_frames == 0) {
transforms->input_images->addTime("opticalflow_produced");
try { try {
output_queue->push(transforms); output_queue->push(transforms);
} catch (const tbb::user_abort&) { } catch (const tbb::user_abort&) {
@ -201,7 +206,7 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
const std::map<KeypointId, size_t>& pyramid_levels_1, const std::map<KeypointId, size_t>& pyramid_levels_1,
Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_2, Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_2,
std::map<KeypointId, size_t>& pyramid_levels_2, std::map<KeypointId, size_t>& pyramid_levels_2,
Vector2 view_offset = Vector2::Zero()) const { bool matching = false) const {
size_t num_points = transform_map_1.size(); size_t num_points = transform_map_1.size();
std::vector<KeypointId> ids; std::vector<KeypointId> ids;
@ -230,34 +235,42 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
const Eigen::AffineCompact2f& transform_1 = init_vec[r]; const Eigen::AffineCompact2f& transform_1 = init_vec[r];
Eigen::AffineCompact2f transform_2 = transform_1; Eigen::AffineCompact2f transform_2 = transform_1;
transform_2.translation() -= view_offset;
bool valid = auto t1 = transform_1.translation();
transform_2.translation()(0) >= 0 && auto t2 = transform_2.translation();
transform_2.translation()(1) >= 0 &&
transform_2.translation()(0) < pyr_2.lvl(pyramid_level[r]).w && Eigen::Vector2f off{0, 0};
transform_2.translation()(1) < pyr_2.lvl(pyramid_level[r]).h; MatchingGuessType guess_type = config.optical_flow_matching_guess_type;
if (matching && guess_type != MatchingGuessType::SAME_PIXEL) {
off = calib.viewOffset(t1, depth_guess).template cast<float>();
transforms->input_images->depth_guess = depth_guess; // For UI
}
t2 -= off; // This modifies transform_2
bool valid = t2(0) >= 0 && t2(1) >= 0 && t2(0) < pyr_2.lvl(0).w &&
t2(1) < pyr_2.lvl(0).h;
if (!valid) continue; if (!valid) continue;
valid = trackPoint(pyr_1, pyr_2, transform_1, pyramid_level[r], valid = trackPoint(pyr_1, pyr_2, transform_1, pyramid_level[r],
transform_2); transform_2);
if (valid) { if (!valid) continue;
Eigen::AffineCompact2f transform_1_recovered = transform_2;
transform_1_recovered.translation() += view_offset;
valid = trackPoint(pyr_2, pyr_1, transform_2, pyramid_level[r],
transform_1_recovered);
if (valid) { Eigen::AffineCompact2f transform_1_recovered = transform_2;
const Scalar scale = 1 << pyramid_level[r]; auto t1_recovered = transform_1_recovered.translation();
Scalar dist2 = (transform_1.translation() / scale -
transform_1_recovered.translation() / scale)
.squaredNorm();
if (dist2 < config.optical_flow_max_recovered_dist2) { t1_recovered += off;
result_transforms[id] = transform_2;
result_pyramid_level[id] = pyramid_level[r]; valid = trackPoint(pyr_2, pyr_1, transform_2, pyramid_level[r],
} transform_1_recovered);
} if (!valid) continue;
const Scalar scale = 1 << pyramid_level[r];
Scalar dist2 = ((t1 - t1_recovered) / scale).squaredNorm();
if (dist2 < config.optical_flow_max_recovered_dist2) {
result_transforms[id] = transform_2;
result_pyramid_level[id] = pyramid_level[r];
} }
} }
}; };
@ -400,7 +413,7 @@ class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase {
trackPoints(pyramid->at(0), pyramid->at(1), new_poses_main, trackPoints(pyramid->at(0), pyramid->at(1), new_poses_main,
new_pyramid_levels_main, new_poses_stereo, new_pyramid_levels_main, new_poses_stereo,
new_pyramid_levels_stereo, calib.view_offset); new_pyramid_levels_stereo, true);
for (const auto& kv : new_poses_stereo) { for (const auto& kv : new_poses_stereo) {
transforms->observations.at(1).emplace(kv); transforms->observations.at(1).emplace(kv);

View File

@ -60,6 +60,8 @@ struct OpticalFlowInput {
int64_t t_ns; int64_t t_ns;
std::vector<ImageData> img_data; std::vector<ImageData> img_data;
double depth_guess = -1;
timestats stats; //!< Keeps track of internal metrics for this t_ns timestats stats; //!< Keeps track of internal metrics for this t_ns
void addTime(const char* name, int64_t custom_ts = INT64_MIN) { void addTime(const char* name, int64_t custom_ts = INT64_MIN) {
stats.addTime(name, custom_ts); stats.addTime(name, custom_ts);
@ -83,9 +85,11 @@ class OpticalFlowBase {
using Ptr = std::shared_ptr<OpticalFlowBase>; using Ptr = std::shared_ptr<OpticalFlowBase>;
tbb::concurrent_bounded_queue<OpticalFlowInput::Ptr> input_queue; tbb::concurrent_bounded_queue<OpticalFlowInput::Ptr> input_queue;
tbb::concurrent_queue<double> input_depth_queue;
tbb::concurrent_bounded_queue<OpticalFlowResult::Ptr>* output_queue = nullptr; tbb::concurrent_bounded_queue<OpticalFlowResult::Ptr>* output_queue = nullptr;
Eigen::MatrixXf patch_coord; Eigen::MatrixXf patch_coord;
double depth_guess = -1;
}; };
class OpticalFlowFactory { class OpticalFlowFactory {

View File

@ -77,19 +77,20 @@ class PatchOpticalFlow : public OpticalFlowBase {
PatchOpticalFlow(const VioConfig& config, PatchOpticalFlow(const VioConfig& config,
const basalt::Calibration<double>& calib) const basalt::Calibration<double>& calib)
: t_ns(-1), : t_ns(-1), frame_counter(0), last_keypoint_id(0), config(config) {
frame_counter(0),
last_keypoint_id(0),
config(config),
calib(calib) {
patches.reserve(3000); patches.reserve(3000);
input_queue.set_capacity(10); input_queue.set_capacity(10);
this->calib = calib.cast<Scalar>();
patch_coord = PatchT::pattern2.template cast<float>(); patch_coord = PatchT::pattern2.template cast<float>();
depth_guess = config.optical_flow_matching_default_depth;
if (calib.intrinsics.size() > 1) { if (calib.intrinsics.size() > 1) {
Eigen::Matrix4d Ed;
Sophus::SE3d T_i_j = calib.T_i_c[0].inverse() * calib.T_i_c[1]; Sophus::SE3d T_i_j = calib.T_i_c[0].inverse() * calib.T_i_c[1];
computeEssential(T_i_j, E); computeEssential(T_i_j, Ed);
E = Ed.cast<Scalar>();
} }
processing_thread.reset( processing_thread.reset(
@ -102,12 +103,15 @@ class PatchOpticalFlow : public OpticalFlowBase {
OpticalFlowInput::Ptr input_ptr; OpticalFlowInput::Ptr input_ptr;
while (true) { while (true) {
while (input_depth_queue.try_pop(depth_guess)) continue;
input_queue.pop(input_ptr); input_queue.pop(input_ptr);
if (!input_ptr.get()) { if (!input_ptr.get()) {
output_queue->push(nullptr); output_queue->push(nullptr);
break; break;
} }
input_ptr->addTime("frames_received");
processFrame(input_ptr->t_ns, input_ptr); processFrame(input_ptr->t_ns, input_ptr);
} }
@ -168,17 +172,19 @@ class PatchOpticalFlow : public OpticalFlowBase {
} }
if (output_queue && frame_counter % config.optical_flow_skip_frames == 0) { if (output_queue && frame_counter % config.optical_flow_skip_frames == 0) {
transforms->input_images->addTime("opticalflow_produced");
output_queue->push(transforms); output_queue->push(transforms);
} }
frame_counter++; frame_counter++;
} }
void trackPoints(const basalt::ManagedImagePyr<uint16_t>& pyr_1, void trackPoints(
const basalt::ManagedImagePyr<uint16_t>& pyr_2, const basalt::ManagedImagePyr<uint16_t>& pyr_1,
const Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& const basalt::ManagedImagePyr<uint16_t>& pyr_2,
transform_map_1, const Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>&
Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_1,
transform_map_2) const { Eigen::aligned_map<KeypointId, Eigen::AffineCompact2f>& transform_map_2,
bool matching = false) const {
size_t num_points = transform_map_1.size(); size_t num_points = transform_map_1.size();
std::vector<KeypointId> ids; std::vector<KeypointId> ids;
@ -203,24 +209,39 @@ class PatchOpticalFlow : public OpticalFlowBase {
const Eigen::AffineCompact2f& transform_1 = init_vec[r]; const Eigen::AffineCompact2f& transform_1 = init_vec[r];
Eigen::AffineCompact2f transform_2 = transform_1; Eigen::AffineCompact2f transform_2 = transform_1;
auto t1 = transform_1.translation();
auto t2 = transform_2.translation();
Eigen::Vector2f off{0, 0};
MatchingGuessType guess_type = config.optical_flow_matching_guess_type;
if (matching && guess_type != MatchingGuessType::SAME_PIXEL) {
off = calib.viewOffset(t1, depth_guess).template cast<float>();
transforms->input_images->depth_guess = depth_guess; // For UI
}
t2 -= off; // This modifies transform_2
bool valid = t2(0) >= 0 && t2(1) >= 0 && t2(0) < pyr_2.lvl(0).w &&
t2(1) < pyr_2.lvl(0).h;
if (!valid) continue;
const Eigen::aligned_vector<PatchT>& patch_vec = patches.at(id); const Eigen::aligned_vector<PatchT>& patch_vec = patches.at(id);
bool valid = trackPoint(pyr_2, patch_vec, transform_2); valid = trackPoint(pyr_2, patch_vec, transform_2);
if (!valid) continue;
if (valid) { Eigen::AffineCompact2f transform_1_recovered = transform_2;
Eigen::AffineCompact2f transform_1_recovered = transform_2; auto t1_recovered = transform_1_recovered.translation();
valid = trackPoint(pyr_1, patch_vec, transform_1_recovered); t1_recovered += off;
if (valid) { valid = trackPoint(pyr_1, patch_vec, transform_1_recovered);
Scalar dist2 = (transform_1.translation() - if (!valid) continue;
transform_1_recovered.translation())
.squaredNorm();
if (dist2 < config.optical_flow_max_recovered_dist2) { Scalar dist2 = (t1 - t1_recovered).squaredNorm();
result[id] = transform_2;
} if (dist2 < config.optical_flow_max_recovered_dist2) {
} result[id] = transform_2;
} }
} }
}; };
@ -334,7 +355,7 @@ class PatchOpticalFlow : public OpticalFlowBase {
} }
if (calib.intrinsics.size() > 1) { if (calib.intrinsics.size() > 1) {
trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1); trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1, true);
for (const auto& kv : new_poses1) { for (const auto& kv : new_poses1) {
transforms->observations.at(1).emplace(kv); transforms->observations.at(1).emplace(kv);
@ -348,19 +369,19 @@ class PatchOpticalFlow : public OpticalFlowBase {
std::set<KeypointId> lm_to_remove; std::set<KeypointId> lm_to_remove;
std::vector<KeypointId> kpid; std::vector<KeypointId> kpid;
Eigen::aligned_vector<Eigen::Vector2d> proj0, proj1; Eigen::aligned_vector<Eigen::Vector2f> proj0, proj1;
for (const auto& kv : transforms->observations.at(1)) { for (const auto& kv : transforms->observations.at(1)) {
auto it = transforms->observations.at(0).find(kv.first); auto it = transforms->observations.at(0).find(kv.first);
if (it != transforms->observations.at(0).end()) { if (it != transforms->observations.at(0).end()) {
proj0.emplace_back(it->second.translation().cast<double>()); proj0.emplace_back(it->second.translation());
proj1.emplace_back(kv.second.translation().cast<double>()); proj1.emplace_back(kv.second.translation());
kpid.emplace_back(kv.first); kpid.emplace_back(kv.first);
} }
} }
Eigen::aligned_vector<Eigen::Vector4d> p3d0, p3d1; Eigen::aligned_vector<Eigen::Vector4f> p3d0, p3d1;
std::vector<bool> p3d0_success, p3d1_success; std::vector<bool> p3d0_success, p3d1_success;
calib.intrinsics[0].unproject(proj0, p3d0, p3d0_success); calib.intrinsics[0].unproject(proj0, p3d0, p3d0_success);
@ -393,7 +414,7 @@ class PatchOpticalFlow : public OpticalFlowBase {
KeypointId last_keypoint_id; KeypointId last_keypoint_id;
VioConfig config; VioConfig config;
basalt::Calibration<double> calib; basalt::Calibration<Scalar> calib;
Eigen::aligned_unordered_map<KeypointId, Eigen::aligned_vector<PatchT>> Eigen::aligned_unordered_map<KeypointId, Eigen::aligned_vector<PatchT>>
patches; patches;
@ -402,7 +423,7 @@ class PatchOpticalFlow : public OpticalFlowBase {
std::shared_ptr<std::vector<basalt::ManagedImagePyr<uint16_t>>> old_pyramid, std::shared_ptr<std::vector<basalt::ManagedImagePyr<uint16_t>>> old_pyramid,
pyramid; pyramid;
Eigen::Matrix4d E; Matrix4 E;
std::shared_ptr<std::thread> processing_thread; std::shared_ptr<std::thread> processing_thread;
}; };

View File

@ -39,6 +39,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace basalt { namespace basalt {
enum class LinearizationType { ABS_QR, ABS_SC, REL_SC }; enum class LinearizationType { ABS_QR, ABS_SC, REL_SC };
enum class MatchingGuessType { SAME_PIXEL, REPROJ_FIX_DEPTH, REPROJ_AVG_DEPTH };
struct VioConfig { struct VioConfig {
VioConfig(); VioConfig();
@ -53,6 +54,8 @@ struct VioConfig {
int optical_flow_levels; int optical_flow_levels;
float optical_flow_epipolar_error; float optical_flow_epipolar_error;
int optical_flow_skip_frames; int optical_flow_skip_frames;
MatchingGuessType optical_flow_matching_guess_type;
float optical_flow_matching_default_depth;
LinearizationType vio_linearization_type; LinearizationType vio_linearization_type;
bool vio_sqrt_marg; bool vio_sqrt_marg;

View File

@ -113,7 +113,8 @@ class SqrtKeypointVoEstimator : public VioEstimatorBase,
// int64_t propagate(); // int64_t propagate();
// void addNewState(int64_t data_t_ns); // void addNewState(int64_t data_t_ns);
void optimize_and_marg(const std::map<int64_t, int>& num_points_connected, void optimize_and_marg(const OpticalFlowInput::Ptr& input_images,
const std::map<int64_t, int>& num_points_connected,
const std::unordered_set<KeypointId>& lost_landmaks); const std::unordered_set<KeypointId>& lost_landmaks);
void marginalize(const std::map<int64_t, int>& num_points_connected, void marginalize(const std::map<int64_t, int>& num_points_connected,

View File

@ -54,7 +54,8 @@ struct VioVisualizationData {
OpticalFlowResult::Ptr opt_flow_res; OpticalFlowResult::Ptr opt_flow_res;
std::vector<Eigen::aligned_vector<Eigen::Vector4d>> projections; std::shared_ptr<std::vector<Eigen::aligned_vector<Eigen::Vector4d>>>
projections;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW
}; };
@ -85,6 +86,8 @@ class VioEstimatorBase {
tbb::concurrent_bounded_queue<VioVisualizationData::Ptr>* out_vis_queue = tbb::concurrent_bounded_queue<VioVisualizationData::Ptr>* out_vis_queue =
nullptr; nullptr;
tbb::concurrent_queue<double>* opt_flow_depth_guess_queue = nullptr;
virtual void initialize(int64_t t_ns, const Sophus::SE3d& T_w_i, virtual void initialize(int64_t t_ns, const Sophus::SE3d& T_w_i,
const Eigen::Vector3d& vel_w_i, const Eigen::Vector3d& vel_w_i,
const Eigen::Vector3d& bg, const Eigen::Vector3d& bg,

View File

@ -111,8 +111,7 @@ calib_template = Template('''{
"gyro_noise_std": [0.0, 0.0, 0.0], "gyro_noise_std": [0.0, 0.0, 0.0],
"accel_bias_std": [0.0, 0.0, 0.0], "accel_bias_std": [0.0, 0.0, 0.0],
"gyro_bias_std": [0.0, 0.0, 0.0], "gyro_bias_std": [0.0, 0.0, 0.0],
"cam_time_offset_ns": 0, "cam_time_offset_ns": 0
"view_offset": [0.0, 0.0]
} }
} }
''') ''')

View File

@ -439,8 +439,8 @@ void draw_image_overlay(pangolin::View& v, size_t cam_id) {
auto it = vis_map.find(gt_frame_t_ns[frame_id]); auto it = vis_map.find(gt_frame_t_ns[frame_id]);
if (it != vis_map.end() && cam_id < it->second->projections.size()) { if (it != vis_map.end() && cam_id < it->second->projections->size()) {
const auto& points = it->second->projections[cam_id]; const auto& points = it->second->projections->at(cam_id);
if (points.size() > 0) { if (points.size() > 0) {
double min_id = points[0][2], max_id = points[0][2]; double min_id = points[0][2], max_id = points[0][2];

View File

@ -252,6 +252,7 @@ struct slam_tracker::implementation {
while (running) { while (running) {
cout << "[in] frames: " << image_data_queue->size() << "/" << image_data_queue->capacity() << " \t" cout << "[in] frames: " << image_data_queue->size() << "/" << image_data_queue->capacity() << " \t"
<< "[in] imu: " << imu_data_queue->size() << "/" << imu_data_queue->capacity() << " \t" << "[in] imu: " << imu_data_queue->size() << "/" << imu_data_queue->capacity() << " \t"
<< "[in] depth: " << opt_flow_ptr->input_depth_queue.unsafe_size() << "/-- \t"
<< "[mid] keypoints: " << opt_flow_ptr->output_queue->size() << "/" << opt_flow_ptr->output_queue->capacity() << "[mid] keypoints: " << opt_flow_ptr->output_queue->size() << "/" << opt_flow_ptr->output_queue->capacity()
<< " \t" << " \t"
<< "[out] pose: " << out_state_queue.size() << "/" << out_state_queue.capacity() << "\n"; << "[out] pose: " << out_state_queue.size() << "/" << out_state_queue.capacity() << "\n";
@ -274,13 +275,6 @@ struct slam_tracker::implementation {
apply_cam_calibration(c); apply_cam_calibration(c);
} }
bool calib_from_monado = added_cam_calibs.size() == NUM_CAMS;
bool view_offset_unknown = calib.view_offset(0) == 0 && calib.view_offset(1) == 0;
if (calib_from_monado || view_offset_unknown) {
compute_view_offset();
cout << "Computed view_offset = " << calib.view_offset.transpose() << "\n";
}
// Overwrite IMU calibration data // Overwrite IMU calibration data
for (const auto &c : added_imu_calibs) { for (const auto &c : added_imu_calibs) {
apply_imu_calibration(c); apply_imu_calibration(c);
@ -305,6 +299,7 @@ struct slam_tracker::implementation {
vio->out_vis_queue = &ui.out_vis_queue; vio->out_vis_queue = &ui.out_vis_queue;
}; };
vio->out_state_queue = &out_state_queue; vio->out_state_queue = &out_state_queue;
vio->opt_flow_depth_guess_queue = &opt_flow_ptr->input_depth_queue;
if (!marg_data_path.empty()) { if (!marg_data_path.empty()) {
marg_data_saver.reset(new MargDataSaver(marg_data_path)); marg_data_saver.reset(new MargDataSaver(marg_data_path));
@ -316,7 +311,7 @@ struct slam_tracker::implementation {
running = true; running = true;
vio->initialize(Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero()); vio->initialize(Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero());
if (show_gui) ui.start(vio->getT_w_i_init(), calib); if (show_gui) ui.start(vio->getT_w_i_init(), calib, vio_config, &opt_flow_ptr->input_depth_queue, opt_flow_ptr);
state_consumer_thread = thread(&slam_tracker::implementation::state_consumer, this); state_consumer_thread = thread(&slam_tracker::implementation::state_consumer, this);
if (print_queue) queues_printer_thread = thread(&slam_tracker::implementation::queues_printer, this); if (print_queue) queues_printer_thread = thread(&slam_tracker::implementation::queues_printer, this);
} }
@ -441,22 +436,6 @@ struct slam_tracker::implementation {
return true; return true;
} }
void compute_view_offset() {
constexpr double DISTANCE_TO_WALL = 2; // In meters
double width = calib.resolution[0][0];
double height = calib.resolution[0][1];
Sophus::SE3d T_i_c0 = calib.T_i_c[0];
Sophus::SE3d T_i_c1 = calib.T_i_c[1];
Sophus::SE3d T_c1_i = T_i_c1.inverse();
Sophus::SE3d T_c1_c0 = T_c1_i * T_i_c0; // Maps a point in c0 space to c1 space
Eigen::Vector4d p3d{0, 0, DISTANCE_TO_WALL, 1};
Eigen::Vector4d p3d_in_c1 = T_c1_c0 * p3d;
Eigen::Vector2d p2d;
calib.intrinsics[1].project(p3d_in_c1, p2d);
calib.view_offset.x() = (width / 2) - p2d.x();
calib.view_offset.y() = (height / 2) - p2d.y();
}
void add_cam_calibration(const cam_calibration &cam_calib) { added_cam_calibs.push_back(cam_calib); } void add_cam_calibration(const cam_calibration &cam_calib) { added_cam_calibs.push_back(cam_calib); }
void apply_cam_calibration(const cam_calibration &cam_calib) { void apply_cam_calibration(const cam_calibration &cam_calib) {

View File

@ -5,10 +5,13 @@
#include <CLI/CLI.hpp> #include <CLI/CLI.hpp>
#include <cstddef>
#include <cstdio> #include <cstdio>
#include <memory> #include <memory>
#include <string> #include <string>
#include <thread> #include <thread>
#include "basalt/optical_flow/optical_flow.h"
#include "basalt/utils/vio_config.h"
#include "sophus/se3.hpp" #include "sophus/se3.hpp"
#include <basalt/io/marg_data_io.h> #include <basalt/io/marg_data_io.h>
@ -51,6 +54,8 @@ class slam_tracker_ui {
public: public:
tbb::concurrent_bounded_queue<VioVisualizationData::Ptr> out_vis_queue{}; tbb::concurrent_bounded_queue<VioVisualizationData::Ptr> out_vis_queue{};
tbb::concurrent_queue<double> *opt_flow_depth_queue = nullptr;
OpticalFlowBase::Ptr opt_flow;
void initialize(int ncams) { void initialize(int ncams) {
vio_data_log.Clear(); vio_data_log.Clear();
@ -58,10 +63,13 @@ class slam_tracker_ui {
num_cams = ncams; num_cams = ncams;
} }
void start(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &calib) { void start(const Sophus::SE3d &T_w_i_init, const Calibration<double> &calib, const VioConfig &config,
tbb::concurrent_queue<double> *ofq_depth, OpticalFlowBase::Ptr of) {
opt_flow_depth_queue = ofq_depth;
opt_flow = of;
running = true; running = true;
start_visualization_thread(); start_visualization_thread();
start_ui(T_w_i_init, calib); start_ui(T_w_i_init, calib, config);
} }
void stop() { void stop() {
@ -74,6 +82,7 @@ class slam_tracker_ui {
vis_thread = thread([&]() { vis_thread = thread([&]() {
while (true) { while (true) {
out_vis_queue.pop(curr_vis_data); out_vis_queue.pop(curr_vis_data);
show_frame = show_frame + 1;
if (curr_vis_data.get() == nullptr) break; if (curr_vis_data.get() == nullptr) break;
} }
cout << "Finished vis_thread\n"; cout << "Finished vis_thread\n";
@ -109,9 +118,10 @@ class slam_tracker_ui {
pangolin::Plotter *plotter; pangolin::Plotter *plotter;
pangolin::DataLog imu_data_log{}; pangolin::DataLog imu_data_log{};
basalt::Calibration<double> calib; Calibration<double> calib;
void start_ui(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &c) { VioConfig config;
ui_runner_thread = thread(&slam_tracker_ui::ui_runner, this, T_w_i_init, c); void start_ui(const Sophus::SE3d &T_w_i_init, const Calibration<double> &cal, const VioConfig &conf) {
ui_runner_thread = thread(&slam_tracker_ui::ui_runner, this, T_w_i_init, cal, conf);
} }
pangolin::Var<bool> follow{"ui.follow", true, false, true}; pangolin::Var<bool> follow{"ui.follow", true, false, true};
@ -120,11 +130,12 @@ class slam_tracker_ui {
pangolin::Var<bool> show_est_bg{"ui.show_est_bg", false, false, true}; pangolin::Var<bool> show_est_bg{"ui.show_est_bg", false, false, true};
pangolin::Var<bool> show_est_ba{"ui.show_est_ba", false, false, true}; pangolin::Var<bool> show_est_ba{"ui.show_est_ba", false, false, true};
void ui_runner(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &c) { void ui_runner(const Sophus::SE3d &T_w_i_init, const Calibration<double> &cal, const VioConfig &conf) {
constexpr int UI_WIDTH = 200; constexpr int UI_WIDTH = 200;
calib = c; calib = cal;
string window_name = "Basalt SLAM Tracker for Monado"; config = conf;
string window_name = "Basalt 6DoF Tracker for Monado";
pangolin::CreateWindowAndBind(window_name, 1800, 1000); pangolin::CreateWindowAndBind(window_name, 1800, 1000);
glEnable(GL_DEPTH_TEST); glEnable(GL_DEPTH_TEST);
@ -143,6 +154,7 @@ class slam_tracker_ui {
std::vector<std::shared_ptr<pangolin::ImageView>> img_view; std::vector<std::shared_ptr<pangolin::ImageView>> img_view;
while (img_view.size() < calib.intrinsics.size()) { while (img_view.size() < calib.intrinsics.size()) {
std::shared_ptr<pangolin::ImageView> iv(new pangolin::ImageView); std::shared_ptr<pangolin::ImageView> iv(new pangolin::ImageView);
iv->UseNN() = true; // Disable antialiasing (toggle it back with the N key)
size_t idx = img_view.size(); size_t idx = img_view.size();
img_view.push_back(iv); img_view.push_back(iv);
@ -185,6 +197,11 @@ class slam_tracker_ui {
img_view_display.Activate(); img_view_display.Activate();
if (fixed_depth.GuiChanged() && opt_flow_depth_queue != nullptr) {
opt_flow_depth_queue->push(fixed_depth);
}
depth_guess = opt_flow->depth_guess;
{ {
pangolin::GlPixFormat fmt; pangolin::GlPixFormat fmt;
fmt.glformat = GL_LUMINANCE; fmt.glformat = GL_LUMINANCE;
@ -217,8 +234,19 @@ class slam_tracker_ui {
cout << "Finished ui_runner\n"; cout << "Finished ui_runner\n";
} }
pangolin::Var<int> show_frame{"ui.show_frame", 0, pangolin::META_FLAG_READONLY};
pangolin::Var<bool> show_obs{"ui.show_obs", true, false, true}; pangolin::Var<bool> show_obs{"ui.show_obs", true, false, true};
pangolin::Var<bool> show_ids{"ui.show_ids", false, false, true}; pangolin::Var<bool> show_ids{"ui.show_ids", false, false, true};
pangolin::Var<bool> show_invdist{"ui.show_invdist", false, false, true};
pangolin::Var<bool> show_guesses{"ui.Show matching guesses", false, false, true};
pangolin::Var<bool> show_same_pixel_guess{"ui.SAME_PIXEL", true, false, true};
pangolin::Var<bool> show_reproj_avg_depth_guess{"ui.REPROJ_AVG_DEPTH", true, false, true};
pangolin::Var<bool> show_reproj_fix_depth_guess{"ui.REPROJ_FIX_DEPTH", true, false, true};
pangolin::Var<double> fixed_depth{"ui.FIX_DEPTH", 2, 0, 10};
pangolin::Var<bool> show_active_guess{"ui.Active Guess", true, false, true};
pangolin::Var<double> depth_guess{"ui.depth_guess", 2, pangolin::META_FLAG_READONLY};
void draw_image_overlay(pangolin::View &v, size_t cam_id) { void draw_image_overlay(pangolin::View &v, size_t cam_id) {
UNUSED(v); UNUSED(v);
@ -229,14 +257,14 @@ class slam_tracker_ui {
glEnable(GL_BLEND); glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (curr_vis_data && cam_id < curr_vis_data->projections.size()) { if (curr_vis_data && cam_id < curr_vis_data->projections->size()) {
const auto &points = curr_vis_data->projections[cam_id]; const auto &points = curr_vis_data->projections->at(cam_id);
if (!points.empty()) { if (!points.empty()) {
double min_id = points[0][2]; double min_id = points[0][2];
double max_id = points[0][2]; double max_id = points[0][2];
for (const auto &points2 : curr_vis_data->projections) { for (const auto &points2 : *curr_vis_data->projections) {
for (const auto &p : points2) { for (const auto &p : points2) {
min_id = std::min(min_id, p[2]); min_id = std::min(min_id, p[2]);
max_id = std::max(max_id, p[2]); max_id = std::max(max_id, p[2]);
@ -255,6 +283,70 @@ class slam_tracker_ui {
pangolin::glDrawCirclePerimeter(c[0], c[1], radius); pangolin::glDrawCirclePerimeter(c[0], c[1], radius);
if (show_ids) pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]); if (show_ids) pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]);
if (show_invdist) pangolin::GlFont::I().Text("%.3lf", c[2]).Draw(c[0], c[1] + 5);
}
}
if (show_guesses && cam_id == 1) {
using namespace Eigen;
const auto keypoints0 = curr_vis_data->projections->at(0);
const auto keypoints1 = curr_vis_data->projections->at(1);
double avg_invdepth = 0;
double num_features = 0;
for (const auto &cam_projs : *curr_vis_data->projections) {
for (const Vector4d &v : cam_projs) avg_invdepth += v.z();
num_features += cam_projs.size();
}
bool valid = avg_invdepth > 0 && num_features > 0;
float default_depth = config.optical_flow_matching_default_depth;
double avg_depth = valid ? num_features / avg_invdepth : default_depth;
for (const Vector4d kp1 : keypoints1) {
double u1 = kp1.x();
double v1 = kp1.y();
// double invdist1 = kp1.z();
double id1 = kp1.w();
double u0 = 0;
double v0 = 0;
bool found = false;
for (const Vector4d &kp0 : keypoints0) {
double id0 = kp0.w();
if (id1 != id0) continue;
u0 = kp0.x();
v0 = kp0.y();
found = true;
break;
}
if (found) {
if (show_same_pixel_guess) {
glColor3f(0, 1, 1); // Cyan
pangolin::glDrawLine(u1, v1, u0, v0);
}
if (show_reproj_fix_depth_guess) {
glColor3f(1, 1, 0); // Yellow
auto off = calib.viewOffset({u0, v0}, fixed_depth);
pangolin::glDrawLine(u1, v1, u0 - off.x(), v0 - off.y());
}
if (show_reproj_avg_depth_guess) {
glColor3f(1, 0, 1); // Magenta
auto off = calib.viewOffset({u0, v0}, avg_depth);
pangolin::glDrawLine(u1, v1, u0 - off.x(), v0 - off.y());
}
if (show_active_guess) {
glColor3f(1, 0, 0); // Red
Vector2d off{0, 0};
if (config.optical_flow_matching_guess_type != MatchingGuessType::SAME_PIXEL) {
off = calib.viewOffset({u0, v0}, opt_flow->depth_guess);
}
pangolin::glDrawLine(u1, v1, u0 - off.x(), v0 - off.y());
}
}
} }
} }

View File

@ -199,6 +199,7 @@ int main(int argc, char** argv) {
opt_flow_ptr->output_queue = &vio->vision_data_queue; opt_flow_ptr->output_queue = &vio->vision_data_queue;
if (show_gui) vio->out_vis_queue = &out_vis_queue; if (show_gui) vio->out_vis_queue = &out_vis_queue;
vio->out_state_queue = &out_state_queue; vio->out_state_queue = &out_state_queue;
vio->opt_flow_depth_guess_queue = &opt_flow_ptr->input_depth_queue;
basalt::MargDataSaver::Ptr marg_data_saver; basalt::MargDataSaver::Ptr marg_data_saver;

View File

@ -54,6 +54,8 @@ VioConfig::VioConfig() {
optical_flow_levels = 3; optical_flow_levels = 3;
optical_flow_epipolar_error = 0.005; optical_flow_epipolar_error = 0.005;
optical_flow_skip_frames = 1; optical_flow_skip_frames = 1;
optical_flow_matching_guess_type = MatchingGuessType::REPROJ_AVG_DEPTH;
optical_flow_matching_default_depth = 2.0;
vio_linearization_type = LinearizationType::ABS_QR; vio_linearization_type = LinearizationType::ABS_QR;
vio_sqrt_marg = true; vio_sqrt_marg = true;
@ -134,6 +136,30 @@ void VioConfig::load(const std::string& filename) {
namespace cereal { namespace cereal {
template <class Archive>
std::string save_minimal(const Archive& ar,
const basalt::MatchingGuessType& guess_type) {
UNUSED(ar);
auto name = magic_enum::enum_name(guess_type);
return std::string(name);
}
template <class Archive>
void load_minimal(const Archive& ar, basalt::MatchingGuessType& guess_type,
const std::string& name) {
UNUSED(ar);
auto guess_enum = magic_enum::enum_cast<basalt::MatchingGuessType>(name);
if (guess_enum.has_value()) {
guess_type = guess_enum.value();
} else {
std::cerr << "Could not find the MatchingGuessType for " << name
<< std::endl;
std::abort();
}
}
template <class Archive> template <class Archive>
std::string save_minimal(const Archive& ar, std::string save_minimal(const Archive& ar,
const basalt::LinearizationType& linearization_type) { const basalt::LinearizationType& linearization_type) {
@ -169,6 +195,8 @@ void serialize(Archive& ar, basalt::VioConfig& config) {
ar(CEREAL_NVP(config.optical_flow_epipolar_error)); ar(CEREAL_NVP(config.optical_flow_epipolar_error));
ar(CEREAL_NVP(config.optical_flow_levels)); ar(CEREAL_NVP(config.optical_flow_levels));
ar(CEREAL_NVP(config.optical_flow_skip_frames)); ar(CEREAL_NVP(config.optical_flow_skip_frames));
ar(CEREAL_NVP(config.optical_flow_matching_guess_type));
ar(CEREAL_NVP(config.optical_flow_matching_default_depth));
ar(CEREAL_NVP(config.vio_linearization_type)); ar(CEREAL_NVP(config.vio_linearization_type));
ar(CEREAL_NVP(config.vio_sqrt_marg)); ar(CEREAL_NVP(config.vio_sqrt_marg));

View File

@ -36,6 +36,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <basalt/vi_estimator/marg_helper.h> #include <basalt/vi_estimator/marg_helper.h>
#include <basalt/vi_estimator/sqrt_keypoint_vio.h> #include <basalt/vi_estimator/sqrt_keypoint_vio.h>
#include <basalt/optical_flow/optical_flow.h>
#include <basalt/optimization/accumulator.h> #include <basalt/optimization/accumulator.h>
#include <basalt/utils/assert.h> #include <basalt/utils/assert.h>
#include <basalt/utils/system_utils.h> #include <basalt/utils/system_utils.h>
@ -487,12 +488,17 @@ bool SqrtKeypointVioEstimator<Scalar_>::measure(
optimize_and_marg(opt_flow_meas->input_images, num_points_connected, optimize_and_marg(opt_flow_meas->input_images, num_points_connected,
lost_landmaks); lost_landmaks);
const size_t num_cams = opt_flow_meas->observations.size(); size_t num_cams = opt_flow_meas->observations.size();
const bool features_ext = opt_flow_meas->input_images->stats.features_enabled; bool features_ext = opt_flow_meas->input_images->stats.features_enabled;
std::vector<Eigen::aligned_vector<Eigen::Vector4d>> projections{}; bool avg_depth_needed =
if (features_ext || out_vis_queue) { opt_flow_depth_guess_queue && config.optical_flow_matching_guess_type ==
projections.resize(num_cams); MatchingGuessType::REPROJ_AVG_DEPTH;
computeProjections(projections, last_state_t_ns);
using Projections = std::vector<Eigen::aligned_vector<Eigen::Vector4d>>;
std::shared_ptr<Projections> projections = nullptr;
if (features_ext || out_vis_queue || avg_depth_needed) {
projections = std::make_shared<Projections>(num_cams);
computeProjections(*projections, last_state_t_ns);
} }
if (out_state_queue) { if (out_state_queue) {
@ -504,9 +510,24 @@ bool SqrtKeypointVioEstimator<Scalar_>::measure(
data->input_images = opt_flow_meas->input_images; data->input_images = opt_flow_meas->input_images;
data->input_images->addTime("pose_produced"); data->input_images->addTime("pose_produced");
if (avg_depth_needed) {
double avg_invdepth = 0;
double num_features = 0;
for (const auto& cam_projs : *projections) {
for (const Eigen::Vector4d& v : cam_projs) avg_invdepth += v.z();
num_features += cam_projs.size();
}
bool valid = avg_invdepth > 0 && num_features > 0;
float default_depth = config.optical_flow_matching_default_depth;
double avg_depth = valid ? num_features / avg_invdepth : default_depth;
opt_flow_depth_guess_queue->push(avg_depth);
}
if (features_ext) { if (features_ext) {
for (size_t i = 0; i < num_cams; i++) { for (size_t i = 0; i < num_cams; i++) {
for (const Eigen::Vector4d& v : projections[i]) { for (const Eigen::Vector4d& v : projections->at(i)) {
using Feature = using Feature =
xrt::auxiliary::tracking::slam::pose_ext_features_data::feature; xrt::auxiliary::tracking::slam::pose_ext_features_data::feature;
Feature lm{}; Feature lm{};

View File

@ -52,6 +52,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <fmt/format.h> #include <fmt/format.h>
#include <chrono> #include <chrono>
#include <slam_tracker.hpp>
namespace basalt { namespace basalt {
@ -139,6 +140,8 @@ void SqrtKeypointVoEstimator<Scalar_>::initialize(const Eigen::Vector3d& bg,
if (!curr_frame.get()) { if (!curr_frame.get()) {
break; break;
} }
curr_frame->input_images->addTime("vio_start");
curr_frame->input_images->addTime("imu_preintegrated");
// Correct camera time offset (relevant for VIO) // Correct camera time offset (relevant for VIO)
// curr_frame->t_ns += calib.cam_time_offset_ns; // curr_frame->t_ns += calib.cam_time_offset_ns;
@ -408,8 +411,23 @@ bool SqrtKeypointVoEstimator<Scalar_>::measure(
} }
} }
} }
opt_flow_meas->input_images->addTime("landmarks_updated");
optimize_and_marg(num_points_connected, lost_landmaks); optimize_and_marg(opt_flow_meas->input_images, num_points_connected,
lost_landmaks);
size_t num_cams = opt_flow_meas->observations.size();
bool features_ext = opt_flow_meas->input_images->stats.features_enabled;
bool avg_depth_needed =
opt_flow_depth_guess_queue && config.optical_flow_matching_guess_type ==
MatchingGuessType::REPROJ_AVG_DEPTH;
using Projections = std::vector<Eigen::aligned_vector<Eigen::Vector4d>>;
std::shared_ptr<Projections> projections = nullptr;
if (features_ext || out_vis_queue || avg_depth_needed) {
projections = std::make_shared<Projections>(num_cams);
computeProjections(*projections, last_state_t_ns);
}
if (out_state_queue) { if (out_state_queue) {
const PoseStateWithLin<Scalar>& p = frame_poses.at(last_state_t_ns); const PoseStateWithLin<Scalar>& p = frame_poses.at(last_state_t_ns);
@ -419,6 +437,38 @@ bool SqrtKeypointVoEstimator<Scalar_>::measure(
Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero(),
Eigen::Vector3d::Zero())); Eigen::Vector3d::Zero()));
data->input_images = opt_flow_meas->input_images;
data->input_images->addTime("pose_produced");
if (avg_depth_needed) {
double avg_invdepth = 0;
double num_features = 0;
for (const auto& cam_projs : *projections) {
for (const Eigen::Vector4d& v : cam_projs) avg_invdepth += v.z();
num_features += cam_projs.size();
}
bool valid = avg_invdepth > 0 && num_features > 0;
float default_depth = config.optical_flow_matching_default_depth;
double avg_depth = valid ? num_features / avg_invdepth : default_depth;
opt_flow_depth_guess_queue->push(avg_depth);
}
if (features_ext) {
for (size_t i = 0; i < num_cams; i++) {
for (const Eigen::Vector4d& v : projections->at(i)) {
using Feature =
xrt::auxiliary::tracking::slam::pose_ext_features_data::feature;
Feature lm{};
lm.id = v.w();
lm.u = v.x();
lm.v = v.y();
lm.depth = v.z();
data->input_images->stats.addFeature(i, lm);
}
}
}
out_state_queue->push(data); out_state_queue->push(data);
} }
@ -435,8 +485,7 @@ bool SqrtKeypointVoEstimator<Scalar_>::measure(
get_current_points(data->points, data->point_ids); get_current_points(data->points, data->point_ids);
data->projections.resize(opt_flow_meas->observations.size()); data->projections = projections;
computeProjections(data->projections, last_state_t_ns);
data->opt_flow_res = prev_opt_flow_res[last_state_t_ns]; data->opt_flow_res = prev_opt_flow_res[last_state_t_ns];
@ -1309,10 +1358,13 @@ void SqrtKeypointVoEstimator<Scalar_>::optimize() {
template <class Scalar_> template <class Scalar_>
void SqrtKeypointVoEstimator<Scalar_>::optimize_and_marg( void SqrtKeypointVoEstimator<Scalar_>::optimize_and_marg(
const OpticalFlowInput::Ptr& input_images,
const std::map<int64_t, int>& num_points_connected, const std::map<int64_t, int>& num_points_connected,
const std::unordered_set<KeypointId>& lost_landmaks) { const std::unordered_set<KeypointId>& lost_landmaks) {
optimize(); optimize();
input_images->addTime("optimized");
marginalize(num_points_connected, lost_landmaks); marginalize(num_points_connected, lost_landmaks);
input_images->addTime("marginalized");
} }
template <class Scalar_> template <class Scalar_>

View File

@ -95,6 +95,12 @@ pangolin::Var<int> show_frame("ui.show_frame", 0, 0, 1500);
pangolin::Var<bool> show_flow("ui.show_flow", false, false, true); pangolin::Var<bool> show_flow("ui.show_flow", false, false, true);
pangolin::Var<bool> show_obs("ui.show_obs", true, false, true); pangolin::Var<bool> show_obs("ui.show_obs", true, false, true);
pangolin::Var<bool> show_ids("ui.show_ids", false, false, true); pangolin::Var<bool> show_ids("ui.show_ids", false, false, true);
pangolin::Var<bool> show_same_pixel_guess("ui.show_same_pixel_guess", false,
false, true);
pangolin::Var<bool> show_active_guess("ui.show_active_guess", false, false,
true);
pangolin::Var<double> depth_guess{"ui.depth_guess", 2,
pangolin::META_FLAG_READONLY};
pangolin::Var<bool> show_est_pos("ui.show_est_pos", true, false, true); pangolin::Var<bool> show_est_pos("ui.show_est_pos", true, false, true);
pangolin::Var<bool> show_est_vel("ui.show_est_vel", false, false, true); pangolin::Var<bool> show_est_vel("ui.show_est_vel", false, false, true);
@ -310,6 +316,7 @@ int main(int argc, char** argv) {
opt_flow_ptr->output_queue = &vio->vision_data_queue; opt_flow_ptr->output_queue = &vio->vision_data_queue;
if (show_gui) vio->out_vis_queue = &out_vis_queue; if (show_gui) vio->out_vis_queue = &out_vis_queue;
vio->out_state_queue = &out_state_queue; vio->out_state_queue = &out_state_queue;
vio->opt_flow_depth_guess_queue = &opt_flow_ptr->input_depth_queue;
} }
basalt::MargDataSaver::Ptr marg_data_saver; basalt::MargDataSaver::Ptr marg_data_saver;
@ -442,6 +449,7 @@ int main(int argc, char** argv) {
std::vector<std::shared_ptr<pangolin::ImageView>> img_view; std::vector<std::shared_ptr<pangolin::ImageView>> img_view;
while (img_view.size() < calib.intrinsics.size()) { while (img_view.size() < calib.intrinsics.size()) {
std::shared_ptr<pangolin::ImageView> iv(new pangolin::ImageView); std::shared_ptr<pangolin::ImageView> iv(new pangolin::ImageView);
iv->UseNN() = true; // Disable antialiasing, can be toggled with N key
size_t idx = img_view.size(); size_t idx = img_view.size();
img_view.push_back(iv); img_view.push_back(iv);
@ -693,13 +701,13 @@ void draw_image_overlay(pangolin::View& v, size_t cam_id) {
glEnable(GL_BLEND); glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (it != vis_map.end() && cam_id < it->second->projections.size()) { if (it != vis_map.end() && cam_id < it->second->projections->size()) {
const auto& points = it->second->projections[cam_id]; const auto& points = it->second->projections->at(cam_id);
if (points.size() > 0) { if (points.size() > 0) {
double min_id = points[0][2], max_id = points[0][2]; double min_id = points[0][2], max_id = points[0][2];
for (const auto& points2 : it->second->projections) for (const auto& points2 : *it->second->projections)
for (const auto& p : points2) { for (const auto& p : points2) {
min_id = std::min(min_id, p[2]); min_id = std::min(min_id, p[2]);
max_id = std::max(max_id, p[2]); max_id = std::max(max_id, p[2]);
@ -726,6 +734,58 @@ void draw_image_overlay(pangolin::View& v, size_t cam_id) {
} }
} }
depth_guess = it->second->opt_flow_res->input_images->depth_guess;
bool show_guesses = (show_active_guess || show_same_pixel_guess) &&
cam_id == 1 && it != vis_map.end() &&
it->second->projections->size() >= 2;
if (show_guesses) {
const auto keypoints0 = it->second->projections->at(0);
const auto keypoints1 = it->second->projections->at(1);
for (const Eigen::Vector4d& kp1 : keypoints1) {
double u1 = kp1.x();
double v1 = kp1.y();
double id1 = kp1.w();
// Find match in keypoints0
double u0 = 0;
double v0 = 0;
bool found = false;
for (const Eigen::Vector4d& kp0 : keypoints0) {
double id0 = kp0.w();
if (id1 != id0) continue;
u0 = kp0.x();
v0 = kp0.y();
found = true;
break;
}
// Display guess error if this is a stereo feature
// NOTE: keep in mind that these guesses are not really the guesses
// used to detect the feature, but the guess we would use if we were
// to detect the feature right now.
if (found) {
// Guess if we were using SAME_PIXEL
if (show_same_pixel_guess) {
glColor3f(0, 1, 1); // Cyan
pangolin::glDrawLine(u1, v1, u0, v0);
}
// Guess with the current guess type
if (show_active_guess) {
glColor3f(1, 0, 1); // Magenta
Eigen::Vector2d off{0, 0};
if (vio_config.optical_flow_matching_guess_type !=
basalt::MatchingGuessType::SAME_PIXEL) {
off = calib.viewOffset({u0, v0}, depth_guess);
}
pangolin::glDrawLine(u1, v1, u0 - off.x(), v0 - off.y());
}
}
}
}
if (show_flow) { if (show_flow) {
glLineWidth(1.0); glLineWidth(1.0);
glColor3f(1.0, 0.0, 0.0); glColor3f(1.0, 0.0, 0.0);

View File

@ -482,8 +482,8 @@ void draw_image_overlay(pangolin::View& v, size_t cam_id) {
auto it = vis_map.find(gt_frame_t_ns[frame_id]); auto it = vis_map.find(gt_frame_t_ns[frame_id]);
if (it != vis_map.end() && cam_id < it->second->projections.size()) { if (it != vis_map.end() && cam_id < it->second->projections->size()) {
const auto& points = it->second->projections[cam_id]; const auto& points = it->second->projections->at(cam_id);
if (points.size() > 0) { if (points.size() > 0) {
double min_id = points[0][2], max_id = points[0][2]; double min_id = points[0][2], max_id = points[0][2];

@ -1 +1 @@
Subproject commit 1a92e765bd65037313bb482c56886842c4d3d547 Subproject commit ff99f3d040fb6aeb3535338ba69c6300191c9bda