Add slam_tracker adapter for Monado

This adapter took many steps to be built but in this branch we squashed them.
To see a better look at the development history of the tracker see branch mateosss/xrtslam-raw.
This commit is contained in:
Mateo de Mayo 2022-05-06 16:08:16 -03:00
parent b90e7e87fc
commit d3eff0d6c6
8 changed files with 1189 additions and 2 deletions

View File

@ -276,6 +276,10 @@ message(STATUS "Found {fmt} ${fmt_VERSION} in: ${fmt_DIR}")
add_subdirectory(thirdparty) add_subdirectory(thirdparty)
# custom scoped monado target
add_library(basalt::monado INTERFACE IMPORTED)
set_property(TARGET basalt::monado PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/monado)
# custom scoped cli11 target # custom scoped cli11 target
add_library(basalt::cli11 INTERFACE IMPORTED) add_library(basalt::cli11 INTERFACE IMPORTED)
set_property(TARGET basalt::cli11 PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/CLI11/include) set_property(TARGET basalt::cli11 PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/CLI11/include)
@ -361,6 +365,7 @@ target_sources(basalt
${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_abs_sc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_abs_sc.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_base.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_base.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_rel_sc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/linearization/linearization_rel_sc.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/monado/slam_tracker.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/optical_flow/optical_flow.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/optical_flow/optical_flow.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/keypoints.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/keypoints.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/utils/system_utils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/system_utils.cpp
@ -378,8 +383,8 @@ target_sources(basalt
) )
target_link_libraries(basalt target_link_libraries(basalt
PUBLIC ${STD_CXX_FS} basalt::opencv basalt::basalt-headers TBB::tbb PUBLIC ${STD_CXX_FS} basalt::opencv basalt::basalt-headers TBB::tbb pangolin basalt::cli11
PRIVATE basalt::magic_enum rosbag apriltag opengv nlohmann::json fmt::fmt) PRIVATE basalt::magic_enum rosbag apriltag opengv nlohmann::json fmt::fmt basalt::monado)
target_include_directories(basalt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) target_include_directories(basalt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
target_compile_definitions(basalt PUBLIC ${BASALT_COMPILE_DEFINITIONS}) target_compile_definitions(basalt PUBLIC ${BASALT_COMPILE_DEFINITIONS})
#target_compile_definitions(basalt PUBLIC BASALT_DISABLE_ASSERTS) #target_compile_definitions(basalt PUBLIC BASALT_DISABLE_ASSERTS)

19
data/monado/euroc.toml Normal file
View File

@ -0,0 +1,19 @@
# Options for the Basalt SLAM Tracker
# Show GUI
show-gui=1
# Ground-truth camera calibration used for simulation.
cam-calib="/home/mateo/Documents/apps/bsltdeps/basalt/data/euroc_ds_calib.json"
# Path to config file.
config-path="/home/mateo/Documents/apps/bsltdeps/basalt/data/euroc_config.json"
# Path to folder where marginalization data will be stored.
marg-data=""
# Poll and print for queue sizes.
print-queue=0
# Whether to use a double or single precision pipeline.
use-double=0

19
data/monado/tumvi.toml Normal file
View File

@ -0,0 +1,19 @@
# Options for the Basalt SLAM Tracker
# Show GUI
show-gui=1
# Ground-truth camera calibration used for simulation.
cam-calib="/home/mateo/Documents/apps/bsltdeps/basalt/data/tumvi_512_ds_calib.json"
# Path to config file.
config-path="/home/mateo/Documents/apps/bsltdeps/basalt/data/tumvi_512_config.json"
# Path to folder where marginalization data will be stored.
marg-data=""
# Poll and print for queue sizes.
print-queue=0
# Whether to use a double or single precision pipeline.
use-double=0

7
src/monado/.clang-format Normal file
View File

@ -0,0 +1,7 @@
---
Language: Cpp
BasedOnStyle: Google
IndentWidth: 2
IncludeBlocks: Preserve
ColumnLimit: 120
...

8
src/monado/.clang-tidy Normal file
View File

@ -0,0 +1,8 @@
Checks: 'readability-*, -readability-magic-numbers, -readability-function-cognitive-complexity, -readability-else-after-return, -readability-redundant-access-specifiers, performance-*, modernize-*, -modernize-use-trailing-return-type, -modernize-avoid-c-arrays, -modernize-use-nodiscard, -modernize-use-auto, -modernize-pass-by-value, misc-assert-side-effect, -clang-analyzer-osx.*, -clang-analyzer-cplusplus.Move, -clang-analyzer-core.uninitialized.UndefReturn, -clang-analyzer-optin.portability.UnixAPI, -clang-analyzer-unix.Malloc'
HeaderFilterRegex: '.*/thirdparty/basalt-headers/include/basalt/*'
CheckOptions:
- key: readability-identifier-naming
value: lower_case
- key: readability-braces-around-statements.ShortStatementLines
value: 2

499
src/monado/slam_tracker.cpp Normal file
View File

@ -0,0 +1,499 @@
// Copyright 2022, Collabora, Ltd.
#include "slam_tracker.hpp"
#include "slam_tracker_ui.hpp"
#include <pangolin/display/image_view.h>
#include <pangolin/pangolin.h>
#include <CLI/CLI.hpp>
#include <chrono>
#include <cstdio>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_set>
#include "sophus/se3.hpp"
#include <basalt/io/marg_data_io.h>
#include <basalt/serialization/headers_serialization.h>
#include <basalt/vi_estimator/vio_estimator.h>
#include "basalt/utils/vis_utils.h"
namespace xrt::auxiliary::tracking::slam {
const int IMPLEMENTATION_VERSION_MAJOR = HEADER_VERSION_MAJOR;
const int IMPLEMENTATION_VERSION_MINOR = HEADER_VERSION_MINOR;
const int IMPLEMENTATION_VERSION_PATCH = HEADER_VERSION_PATCH;
using std::cout;
using std::make_shared;
using std::make_unique;
using std::pair;
using std::shared_ptr;
using std::static_pointer_cast;
using std::string;
using std::thread;
using std::to_string;
using std::unordered_set;
using std::vector;
using namespace basalt;
string imu2str(const imu_sample &s) {
string str = "imu_sample ";
str += "t=" + to_string(s.timestamp) + " ";
str += "a=[" + to_string(s.ax) + ", " + to_string(s.ay) + ", " + to_string(s.az) + "] ";
str += "w=[" + to_string(s.wx) + ", " + to_string(s.wy) + ", " + to_string(s.wz) + "]";
return str;
}
string img2str(const img_sample &s) {
string str = "img_sample ";
str += s.is_left ? "left " : "right ";
str += "t=" + to_string(s.timestamp);
return str;
}
string pose2str(const pose &p) {
string str = "pose ";
str += "p=[" + to_string(p.px) + ", " + to_string(p.py) + ", " + to_string(p.pz) + "] ";
str += "r=[" + to_string(p.rx) + ", " + to_string(p.ry) + ", " + to_string(p.rz) + ", " + to_string(p.rw) + "]";
return str;
}
struct slam_tracker::implementation {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
// Options parsed from unified config file
bool show_gui = true;
string cam_calib_path;
string config_path;
string marg_data_path;
bool print_queue = false;
bool use_double = false;
// Basalt in its current state does not support monocular cameras, although it
// should be possible to adapt it to do so, see:
// https://gitlab.com/VladyslavUsenko/basalt/-/issues/2#note_201965760
// https://gitlab.com/VladyslavUsenko/basalt/-/issues/25#note_362741510
// https://github.com/DLR-RM/granite
static constexpr int NUM_CAMS = 2;
// VIO members
Calibration<double> calib;
VioConfig vio_config;
OpticalFlowBase::Ptr opt_flow_ptr;
VioEstimatorBase::Ptr vio;
bool expecting_left_frame = true;
// Queues
std::atomic<bool> running = false;
tbb::concurrent_bounded_queue<PoseVelBiasState<double>::Ptr> out_state_queue;
tbb::concurrent_bounded_queue<PoseVelBiasState<double>::Ptr> monado_out_state_queue;
tbb::concurrent_bounded_queue<OpticalFlowInput::Ptr> *image_data_queue = nullptr; // Invariant: not null after ctor
tbb::concurrent_bounded_queue<ImuData<double>::Ptr> *imu_data_queue = nullptr; // Invariant: not null after ctor
// Threads
thread state_consumer_thread;
thread queues_printer_thread;
// External Queues
slam_tracker_ui ui{};
MargDataSaver::Ptr marg_data_saver;
// slam_tracker features
unordered_set<int> supported_features{F_ADD_CAMERA_CALIBRATION, F_ADD_IMU_CALIBRATION, F_ENABLE_POSE_EXT_TIMING};
// Additional calibration data
vector<cam_calibration> added_cam_calibs{};
vector<imu_calibration> added_imu_calibs{};
// Pose timing
bool pose_timing_enabled = false;
public:
implementation(const string &unified_config) {
load_unified_config(unified_config);
vio_config.load(config_path);
load_calibration_data(cam_calib_path);
}
private:
void load_unified_config(const string &unified_config) {
CLI::App app{"Options for the Basalt SLAM Tracker"};
app.add_option("--show-gui", show_gui, "Show GUI");
app.add_option("--cam-calib", cam_calib_path, "Ground-truth camera calibration used for simulation.")->required();
app.add_option("--config-path", config_path, "Path to config file.")->required();
app.add_option("--marg-data", marg_data_path, "Path to folder where marginalization data will be stored.");
app.add_option("--print-queue", print_queue, "Poll and print for queue sizes.");
app.add_option("--use-double", use_double, "Whether to use a double or single precision pipeline.");
try {
// While --config-path sets the VIO configuration, --config sets the
// entire unified Basalt configuration, including --config-path
app.set_config("--config", unified_config, "Configuration file.", true);
app.allow_config_extras(false); // Makes parsing fail on unknown options
string unique_config = "--config=" + unified_config;
app.parse(unique_config);
} catch (const CLI::ParseError &e) {
app.exit(e);
ASSERT(false, "Config file error (%s)", unified_config.c_str());
}
cout << "Instantiating Basalt SLAM tracker\n";
cout << "Using config file: " << app["--config"]->as<string>() << "\n";
cout << app.config_to_str(true, true) << "\n";
}
void load_calibration_data(const string &calib_path) {
std::ifstream os(calib_path, std::ios::binary);
if (os.is_open()) {
cereal::JSONInputArchive archive(os);
archive(calib);
cout << "Loaded camera with " << calib.intrinsics.size() << " cameras\n";
} else {
std::cerr << "could not load camera calibration " << calib_path << "\n";
std::abort();
}
}
pose get_pose_from_state(const PoseVelBiasState<double>::Ptr &state) const {
pose p;
Sophus::SE3d T_w_i = state->T_w_i;
p.px = T_w_i.translation().x();
p.py = T_w_i.translation().y();
p.pz = T_w_i.translation().z();
p.rx = T_w_i.unit_quaternion().x();
p.ry = T_w_i.unit_quaternion().y();
p.rz = T_w_i.unit_quaternion().z();
p.rw = T_w_i.unit_quaternion().w();
p.timestamp = state->t_ns;
p.next = nullptr;
if (pose_timing_enabled) {
auto pose_timing = make_shared<pose_ext_timing>();
pose_timing->timestamps = state->input_images->tss;
p.next = pose_timing;
}
return p;
}
void state_consumer() {
PoseVelBiasState<double>::Ptr data;
while (true) {
out_state_queue.pop(data);
if (data.get() == nullptr) {
monado_out_state_queue.push(nullptr);
break;
}
data->input_images->addTime("tracker_consumer_received");
if (show_gui) ui.log_vio_data(data);
data->input_images->addTime("tracker_consumer_pushed");
monado_out_state_queue.push(data);
}
cout << "Finished state_consumer\n";
}
void queues_printer() {
while (running) {
cout << "[in] frames: " << image_data_queue->size() << "/" << image_data_queue->capacity() << " \t"
<< "[in] imu: " << imu_data_queue->size() << "/" << imu_data_queue->capacity() << " \t"
<< "[mid] keypoints: " << opt_flow_ptr->output_queue->size() << "/" << opt_flow_ptr->output_queue->capacity()
<< " \t"
<< "[out] pose: " << out_state_queue.size() << "/" << out_state_queue.capacity() << "\n";
std::this_thread::sleep_for(std::chrono::milliseconds(500));
}
cout << "Finished queues_printer\n";
}
public:
void initialize() {
// Overwrite camera calibration data
for (const auto &c : added_cam_calibs) {
apply_cam_calibration(c);
}
// Overwrite IMU calibration data
for (const auto &c : added_imu_calibs) {
apply_imu_calibration(c);
}
// NOTE: This factory also starts the optical flow
opt_flow_ptr = OpticalFlowFactory::getOpticalFlow(vio_config, calib);
image_data_queue = &opt_flow_ptr->input_queue;
ASSERT_(image_data_queue != nullptr);
vio = VioEstimatorFactory::getVioEstimator(vio_config, calib, constants::g, true, use_double);
imu_data_queue = &vio->imu_data_queue;
ASSERT_(imu_data_queue != nullptr);
opt_flow_ptr->output_queue = &vio->vision_data_queue;
if (show_gui) {
ui.initialize(NUM_CAMS);
vio->out_vis_queue = &ui.out_vis_queue;
};
vio->out_state_queue = &out_state_queue;
if (!marg_data_path.empty()) {
marg_data_saver.reset(new MargDataSaver(marg_data_path));
vio->out_marg_queue = &marg_data_saver->in_marg_queue;
}
}
void start() {
running = true;
vio->initialize(Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero());
if (show_gui) ui.start(vio->getT_w_i_init(), calib);
state_consumer_thread = thread(&slam_tracker::implementation::state_consumer, this);
if (print_queue) queues_printer_thread = thread(&slam_tracker::implementation::queues_printer, this);
}
void stop() {
running = false;
image_data_queue->push(nullptr);
imu_data_queue->push(nullptr);
if (print_queue) queues_printer_thread.join();
state_consumer_thread.join();
if (show_gui) ui.stop();
// TODO: There is a segfault when closing monado without starting the stream
// happens in a lambda from keypoint_vio.cpp and ends at line calib_bias.hpp:112
}
void finalize() {
// Only the OpticalFlow gets started by initialize, finish it with this
image_data_queue->push(nullptr);
}
bool is_running() { return running; }
void push_imu_sample(const imu_sample &s) {
// concurrent_bounded_queue expects Erasable and Allocator named
// requirements for the type, using a pointer because it already is. This is
// done in the others examples as well but it is far from optimal.
ImuData<double>::Ptr data;
data.reset(new ImuData<double>);
data->t_ns = s.timestamp;
data->accel = {s.ax, s.ay, s.az};
data->gyro = {s.wx, s.wy, s.wz};
imu_data_queue->push(data);
}
private:
OpticalFlowInput::Ptr partial_frame;
public:
void push_frame(const img_sample &s) {
ASSERT(expecting_left_frame == s.is_left, "Unexpected %s frame", s.is_left ? "left" : "right");
expecting_left_frame = !expecting_left_frame;
int i = -1;
if (s.is_left) {
partial_frame = make_shared<OpticalFlowInput>();
partial_frame->timing_enabled = pose_timing_enabled;
partial_frame->addTime("frame_ts", s.timestamp);
partial_frame->addTime("tracker_received");
partial_frame->img_data.resize(NUM_CAMS);
partial_frame->t_ns = s.timestamp;
i = 0;
} else {
ASSERT(partial_frame->t_ns == s.timestamp, "Left and right frame timestamps differ: %ld != %ld",
partial_frame->t_ns, s.timestamp);
i = 1;
}
int width = s.img.cols;
int height = s.img.rows;
// Forced to use uint16_t here, in place because of cameras with 12-bit grayscale support
auto &mimg = partial_frame->img_data[i].img;
mimg.reset(new ManagedImage<uint16_t>(width, height));
// TODO: We could avoid this copy. Maybe by writing a custom
// allocator for ManagedImage that ties the OpenCV allocator
size_t full_size = width * height;
for (size_t j = 0; j < full_size; j++) {
mimg->ptr[j] = s.img.at<uchar>(j) << 8;
}
if (!s.is_left) {
partial_frame->addTime("tracker_pushed");
image_data_queue->push(partial_frame);
if (show_gui) ui.update_last_image(partial_frame);
}
}
bool try_dequeue_pose(pose &p) {
PoseVelBiasState<double>::Ptr state;
bool dequeued = monado_out_state_queue.try_pop(state);
if (dequeued) {
state->input_images->addTime("monado_dequeued");
p = get_pose_from_state(state);
}
return dequeued;
}
bool supports_feature(int feature_id) { return supported_features.count(feature_id) == 1; }
bool use_feature(int feature_id, const shared_ptr<void> &params, shared_ptr<void> &result) {
result = nullptr;
if (feature_id == FID_ACC) {
shared_ptr<FPARAMS_ACC> casted_params = static_pointer_cast<FPARAMS_ACC>(params);
add_cam_calibration(*casted_params);
} else if (feature_id == FID_AIC) {
shared_ptr<FPARAMS_AIC> casted_params = static_pointer_cast<FPARAMS_AIC>(params);
add_imu_calibration(*casted_params);
} else if (feature_id == FID_EPET) {
result = enable_pose_ext_timing();
} else {
return false;
}
return true;
}
void add_cam_calibration(const cam_calibration &cam_calib) { added_cam_calibs.push_back(cam_calib); }
void apply_cam_calibration(const cam_calibration &cam_calib) {
using Scalar = double;
int i = cam_calib.cam_index;
const auto &tci = cam_calib.T_cam_imu;
Eigen::Matrix3d rci;
rci << tci(0, 0), tci(0, 1), tci(0, 2), tci(1, 0), tci(1, 1), tci(1, 2), tci(2, 0), tci(2, 1), tci(2, 2);
Eigen::Quaterniond q(rci);
Eigen::Vector3d p{tci(0, 3), tci(1, 3), tci(2, 3)};
calib.T_i_c[i] = Calibration<Scalar>::SE3(q, p);
GenericCamera<double> model;
const vector<Scalar> &cmp = cam_calib.model_params;
if (cam_calib.model == cam_calibration::cam_model::pinhole) {
PinholeCamera<Scalar>::VecN mp;
mp << cam_calib.fx, cam_calib.fy, cam_calib.cx, cam_calib.cy;
PinholeCamera pinhole(mp);
model.variant = pinhole;
} else if (cam_calib.model == cam_calibration::cam_model::fisheye) {
KannalaBrandtCamera4<Scalar>::VecN mp;
mp << cam_calib.fx, cam_calib.fy, cam_calib.cx, cam_calib.cy, cmp[0], cmp[1], cmp[2], cmp[3];
KannalaBrandtCamera4 kannala_brandt(mp);
model.variant = kannala_brandt;
} else {
ASSERT(false, "Unsupported camera model (%d)", static_cast<int>(cam_calib.model));
}
calib.intrinsics[i] = model;
calib.resolution[i] = {cam_calib.width, cam_calib.height};
// NOTE: ignoring cam_calib.distortion_model and distortion_params as basalt can't use them
}
void add_imu_calibration(const imu_calibration &imu_calib) { added_imu_calibs.push_back(imu_calib); }
void apply_imu_calibration(const imu_calibration &imu_calib) {
using Scalar = double;
int i = imu_calib.imu_index;
ASSERT(i == 0, "More than one IMU unsupported (%d)", i);
static double frequency = -1;
if (frequency == -1) {
frequency = imu_calib.frequency;
calib.imu_update_rate = frequency;
} else {
ASSERT(frequency == calib.imu_update_rate, "Unsupported mix of IMU frequencies %lf != %lf", frequency,
calib.imu_update_rate);
}
// Accelerometer calibration
inertial_calibration accel = imu_calib.accel;
Eigen::Matrix<Scalar, 9, 1> accel_bias_full;
const auto &abias = accel.offset;
const auto &atran = accel.transform;
// TODO: Doing the same as rs_t265.cpp but that's incorrect. We should be doing an LQ decomposition of atran and
// using L. See https://gitlab.com/VladyslavUsenko/basalt-headers/-/issues/8
accel_bias_full << abias(0), abias(1), abias(2), atran(0, 0) - 1, atran(1, 0), atran(2, 0), atran(1, 1) - 1,
atran(2, 1), atran(2, 2) - 1;
CalibAccelBias<Scalar> accel_bias;
accel_bias.getParam() = accel_bias_full;
calib.calib_accel_bias = accel_bias;
calib.accel_noise_std = {accel.noise_std(0), accel.noise_std(1), accel.noise_std(2)};
calib.accel_bias_std = {accel.bias_std(0), accel.bias_std(1), accel.bias_std(2)};
// Gyroscope calibration
inertial_calibration gyro = imu_calib.gyro;
Eigen::Matrix<Scalar, 12, 1> gyro_bias_full;
const auto &gbias = gyro.offset;
const auto &gtran = gyro.transform;
gyro_bias_full << gbias(0), gbias(1), gbias(2), gtran(0, 0) - 1, gtran(1, 0), gtran(2, 0), gtran(0, 1),
gtran(1, 1) - 1, gtran(2, 1), gtran(0, 2), gtran(1, 2), gtran(2, 2) - 1;
CalibGyroBias<Scalar> gyro_bias;
gyro_bias.getParam() = gyro_bias_full;
calib.calib_gyro_bias = gyro_bias;
calib.gyro_noise_std = {gyro.noise_std(0), gyro.noise_std(1), gyro.noise_std(2)};
calib.gyro_bias_std = {gyro.bias_std(0), gyro.bias_std(1), gyro.bias_std(2)};
}
shared_ptr<vector<string>> enable_pose_ext_timing() {
pose_timing_enabled = true;
vector<string> titles{"frame_ts",
"tracker_received",
"tracker_pushed",
"frames_received", // < Basalt computation starts
"opticalflow_produced",
"vio_start",
"imu_preintegrated",
"landmarks_updated",
"optimized",
"marginalized",
"pose_produced", // Basalt computation ends
"tracker_consumer_received",
"tracker_consumer_pushed",
"monado_dequeued"};
return make_shared<vector<string>>(titles);
}
};
slam_tracker::slam_tracker(const string &config_file) { impl = make_unique<slam_tracker::implementation>(config_file); }
slam_tracker::~slam_tracker() = default;
void slam_tracker::initialize() { impl->initialize(); }
void slam_tracker::start() { impl->start(); }
void slam_tracker::stop() { impl->stop(); }
void slam_tracker::finalize() { impl->finalize(); }
bool slam_tracker::is_running() { return impl->is_running(); }
void slam_tracker::push_imu_sample(const imu_sample &s) { impl->push_imu_sample(s); }
void slam_tracker::push_frame(const img_sample &sample) { impl->push_frame(sample); }
bool slam_tracker::try_dequeue_pose(pose &pose) { return impl->try_dequeue_pose(pose); }
bool slam_tracker::supports_feature(int feature_id) { return impl->supports_feature(feature_id); }
bool slam_tracker::use_feature(int feature_id, const shared_ptr<void> &params, shared_ptr<void> &result) {
return impl->use_feature(feature_id, params, result);
}
} // namespace xrt::auxiliary::tracking::slam

View File

@ -0,0 +1,334 @@
#pragma once
#include <pangolin/display/image_view.h>
#include <pangolin/pangolin.h>
#include <CLI/CLI.hpp>
#include <cstdio>
#include <memory>
#include <string>
#include <thread>
#include "sophus/se3.hpp"
#include <basalt/io/marg_data_io.h>
#include <basalt/serialization/headers_serialization.h>
#include <basalt/vi_estimator/vio_estimator.h>
#include "basalt/utils/vis_utils.h"
#define ASSERT(cond, ...) \
do { \
if (!(cond)) { \
printf("Assertion failed @%s:%d\n", __func__, __LINE__); \
printf(__VA_ARGS__); \
printf("\n"); \
exit(EXIT_FAILURE); \
} \
} while (false);
#define ASSERT_(cond) ASSERT(cond, "%s", #cond);
namespace xrt::auxiliary::tracking::slam {
using std::cout;
using std::make_shared;
using std::shared_ptr;
using std::string;
using std::thread;
using std::to_string;
using std::vector;
using namespace basalt;
class slam_tracker_ui {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VioVisualizationData::Ptr curr_vis_data;
pangolin::DataLog vio_data_log;
thread vis_thread;
thread ui_runner_thread;
size_t num_cams = 0;
std::atomic<bool> running = false;
public:
tbb::concurrent_bounded_queue<VioVisualizationData::Ptr> out_vis_queue{};
void initialize(int ncams) {
vio_data_log.Clear();
ASSERT_(ncams > 0);
num_cams = ncams;
}
void start(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &calib) {
running = true;
start_visualization_thread();
start_ui(T_w_i_init, calib);
}
void stop() {
running = false;
vis_thread.join();
ui_runner_thread.join();
}
void start_visualization_thread() {
vis_thread = thread([&]() {
while (true) {
out_vis_queue.pop(curr_vis_data);
if (curr_vis_data.get() == nullptr) break;
}
cout << "Finished vis_thread\n";
});
}
int64_t start_t_ns = -1;
std::vector<int64_t> vio_t_ns;
Eigen::aligned_vector<Eigen::Vector3d> vio_t_w_i;
void log_vio_data(const PoseVelBiasState<double>::Ptr &data) {
int64_t t_ns = data->t_ns;
if (start_t_ns < 0) start_t_ns = t_ns;
float since_start_ns = t_ns - start_t_ns;
vio_t_ns.emplace_back(t_ns);
vio_t_w_i.emplace_back(data->T_w_i.translation());
Sophus::SE3d T_w_i = data->T_w_i;
Eigen::Vector3d vel_w_i = data->vel_w_i;
Eigen::Vector3d bg = data->bias_gyro;
Eigen::Vector3d ba = data->bias_accel;
vector<float> vals;
vals.push_back(since_start_ns * 1e-9);
for (int i = 0; i < 3; i++) vals.push_back(vel_w_i[i]);
for (int i = 0; i < 3; i++) vals.push_back(T_w_i.translation()[i]);
for (int i = 0; i < 3; i++) vals.push_back(bg[i]);
for (int i = 0; i < 3; i++) vals.push_back(ba[i]);
vio_data_log.Log(vals);
}
pangolin::Plotter *plotter;
pangolin::DataLog imu_data_log{};
basalt::Calibration<double> calib;
void start_ui(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &c) {
ui_runner_thread = thread(&slam_tracker_ui::ui_runner, this, T_w_i_init, c);
}
pangolin::Var<bool> follow{"ui.follow", true, false, true};
pangolin::Var<bool> show_est_pos{"ui.show_est_pos", true, false, true};
pangolin::Var<bool> show_est_vel{"ui.show_est_vel", false, false, true};
pangolin::Var<bool> show_est_bg{"ui.show_est_bg", false, false, true};
pangolin::Var<bool> show_est_ba{"ui.show_est_ba", false, false, true};
void ui_runner(const Sophus::SE3d &T_w_i_init, const basalt::Calibration<double> &c) {
constexpr int UI_WIDTH = 200;
calib = c;
string window_name = "Basalt SLAM Tracker for Monado";
pangolin::CreateWindowAndBind(window_name, 1800, 1000);
glEnable(GL_DEPTH_TEST);
pangolin::View &img_view_display = pangolin::CreateDisplay()
.SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 0.4)
.SetLayout(pangolin::LayoutEqual);
pangolin::View &plot_display = pangolin::CreateDisplay().SetBounds(0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0);
plotter = new pangolin::Plotter(&imu_data_log, 0.0, 100, -3.0, 3.0, 0.01, 0.01);
plot_display.AddDisplay(*plotter);
pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, pangolin::Attach::Pix(UI_WIDTH));
std::vector<std::shared_ptr<pangolin::ImageView>> img_view;
while (img_view.size() < calib.intrinsics.size()) {
std::shared_ptr<pangolin::ImageView> iv(new pangolin::ImageView);
size_t idx = img_view.size();
img_view.push_back(iv);
img_view_display.AddDisplay(*iv);
iv->extern_draw_function = [idx, this](auto &v) { this->draw_image_overlay(v, idx); };
}
Eigen::Vector3d cam_p(0.5, -2, -2);
cam_p = T_w_i_init.so3() * calib.T_i_c[0].so3() * cam_p;
cam_p[2] = 1;
pangolin::OpenGlRenderState camera(
pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000),
pangolin::ModelViewLookAt(cam_p[0], cam_p[1], cam_p[2], 0, 0, 0, pangolin::AxisZ));
pangolin::View &display3D = pangolin::CreateDisplay()
.SetAspect(-640 / 480.0)
.SetBounds(0.4, 1.0, 0.4, 1.0)
.SetHandler(new pangolin::Handler3D(camera));
while (running && !pangolin::ShouldQuit()) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (follow) {
// TODO: There is a small race condition here over
// curr_vis_data that is also present in the original basalt examples
if (curr_vis_data) {
auto T_w_i = curr_vis_data->states.back();
T_w_i.so3() = Sophus::SO3d();
camera.Follow(T_w_i.matrix());
}
}
display3D.Activate(camera);
glClearColor(1.0, 1.0, 1.0, 1.0);
draw_scene();
img_view_display.Activate();
{
pangolin::GlPixFormat fmt;
fmt.glformat = GL_LUMINANCE;
fmt.gltype = GL_UNSIGNED_SHORT;
fmt.scalable_internal_format = GL_LUMINANCE16;
if (curr_vis_data && curr_vis_data->opt_flow_res && curr_vis_data->opt_flow_res->input_images) {
auto &img_data = curr_vis_data->opt_flow_res->input_images->img_data;
for (size_t cam_id = 0; cam_id < num_cams; cam_id++) {
if (img_data[cam_id].img) {
img_view[cam_id]->SetImage(img_data[cam_id].img->ptr, img_data[cam_id].img->w, img_data[cam_id].img->h,
img_data[cam_id].img->pitch, fmt);
}
}
}
draw_plots();
}
if (show_est_vel.GuiChanged() || show_est_pos.GuiChanged() || show_est_ba.GuiChanged() ||
show_est_bg.GuiChanged()) {
draw_plots();
}
pangolin::FinishFrame();
}
pangolin::DestroyWindow(window_name);
cout << "Finished ui_runner\n";
}
pangolin::Var<bool> show_obs{"ui.show_obs", true, false, true};
pangolin::Var<bool> show_ids{"ui.show_ids", false, false, true};
void draw_image_overlay(pangolin::View &v, size_t cam_id) {
UNUSED(v);
if (show_obs) {
glLineWidth(1.0);
glColor3f(1.0, 0.0, 0.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (curr_vis_data && cam_id < curr_vis_data->projections.size()) {
const auto &points = curr_vis_data->projections[cam_id];
if (!points.empty()) {
double min_id = points[0][2];
double max_id = points[0][2];
for (const auto &points2 : curr_vis_data->projections) {
for (const auto &p : points2) {
min_id = std::min(min_id, p[2]);
max_id = std::max(max_id, p[2]);
}
}
for (const auto &c : points) {
const float radius = 6.5;
float r;
float g;
float b;
getcolor(c[2] - min_id, max_id - min_id, b, g, r);
glColor3f(r, g, b);
pangolin::glDrawCirclePerimeter(c[0], c[1], radius);
if (show_ids) pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]);
}
}
glColor3f(1.0, 0.0, 0.0);
pangolin::GlFont::I().Text("Tracked %d points", points.size()).Draw(5, 20);
}
}
}
void draw_scene() {
glPointSize(3);
glColor3f(1.0, 0.0, 0.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor3ubv(cam_color);
Eigen::aligned_vector<Eigen::Vector3d> sub_gt(vio_t_w_i.begin(), vio_t_w_i.end());
pangolin::glDrawLineStrip(sub_gt);
if (curr_vis_data) {
for (const auto &p : curr_vis_data->states)
for (const auto &t_i_c : calib.T_i_c) render_camera((p * t_i_c).matrix(), 2.0, state_color, 0.1);
for (const auto &p : curr_vis_data->frames)
for (const auto &t_i_c : calib.T_i_c) render_camera((p * t_i_c).matrix(), 2.0, pose_color, 0.1);
for (const auto &t_i_c : calib.T_i_c)
render_camera((curr_vis_data->states.back() * t_i_c).matrix(), 2.0, cam_color, 0.1);
glColor3ubv(pose_color);
pangolin::glDrawPoints(curr_vis_data->points);
}
pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0);
}
OpticalFlowInput::Ptr last_img_data{};
void update_last_image(OpticalFlowInput::Ptr &data) { last_img_data = data; }
void draw_plots() {
plotter->ClearSeries();
plotter->ClearMarkers();
if (show_est_pos) {
plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, pangolin::Colour::Red(), "position x", &vio_data_log);
plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, pangolin::Colour::Green(), "position y", &vio_data_log);
plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, pangolin::Colour::Blue(), "position z", &vio_data_log);
}
if (show_est_vel) {
plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, pangolin::Colour::Red(), "velocity x", &vio_data_log);
plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, pangolin::Colour::Green(), "velocity y", &vio_data_log);
plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, pangolin::Colour::Blue(), "velocity z", &vio_data_log);
}
if (show_est_bg) {
plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, pangolin::Colour::Red(), "gyro bias x", &vio_data_log);
plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, pangolin::Colour::Green(), "gyro bias y",
&vio_data_log);
plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, pangolin::Colour::Blue(), "gyro bias z", &vio_data_log);
}
if (show_est_ba) {
plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, pangolin::Colour::Red(), "accel bias x",
&vio_data_log);
plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, pangolin::Colour::Green(), "accel bias y",
&vio_data_log);
plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, pangolin::Colour::Blue(), "accel bias z",
&vio_data_log);
}
if (last_img_data) {
double t = last_img_data->t_ns * 1e-9;
plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, pangolin::Colour::White());
}
}
};
} // namespace xrt::auxiliary::tracking::slam

296
thirdparty/monado/slam_tracker.hpp vendored Normal file
View File

@ -0,0 +1,296 @@
// Copyright 2021, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief SLAM tracker class header for usage in Monado.
* @author Mateo de Mayo <mateo.demayo@collabora.com>
* @ingroup aux_tracking
*
* This file contains the declaration of the @ref slam_tracker class. This
* header is intended to appear in both Monado and an external SLAM system. The
* implementation of `slam_tracker` is provided by the external system.
* Additional data types are declared for the communication between Monado and
* the system.
*
*/
#pragma once
#include <opencv2/core/mat.hpp>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
namespace xrt::auxiliary::tracking::slam {
// For implementation: same as IMPLEMENTATION_VERSION_*
// For user: expected IMPLEMENTATION_VERSION_*. Should be checked in runtime.
constexpr int HEADER_VERSION_MAJOR = 2; //!< API Breakages
constexpr int HEADER_VERSION_MINOR = 0; //!< Backwards compatible API changes
constexpr int HEADER_VERSION_PATCH = 0; //!< Backw. comp. .h-implemented changes
// Which header version the external system is implementing.
extern const int IMPLEMENTATION_VERSION_MAJOR;
extern const int IMPLEMENTATION_VERSION_MINOR;
extern const int IMPLEMENTATION_VERSION_PATCH;
enum class pose_ext_type : int;
/*!
* @brief Standard pose type to communicate Monado with the external SLAM system
*/
struct pose {
std::int64_t timestamp; //!< In same clock as input samples
float px, py, pz; //!< Position vector
float rx, ry, rz, rw = 1; //!< Orientation quaternion
std::shared_ptr<struct pose_extension> next = nullptr;
pose() = default;
pose(std::int64_t timestamp, //
float px, float py, float pz, //
float rx, float ry, float rz, float rw)
: timestamp(timestamp), //
px(px), py(py), pz(pz), //
rx(rx), ry(ry), rz(rz), rw(rw) {}
std::shared_ptr<pose_extension>
find_pose_extension(pose_ext_type required_type) const;
};
/*!
* @brief IMU Sample type to pass around between programs
*/
struct imu_sample {
std::int64_t timestamp; //!< In nanoseconds
double ax, ay, az; //!< Accel in meters per second squared (m / s^2)
double wx, wy, wz; //!< Gyro in radians per second (rad / s)
imu_sample() = default;
imu_sample(std::int64_t timestamp, double ax, double ay, double az, double wx,
double wy, double wz)
: timestamp(timestamp), ax(ax), ay(ay), az(az), wx(wx), wy(wy), wz(wz) {}
};
/*!
* @brief Image sample type to pass around between programs. It is expected that
* any SLAM system takes OpenCV matrices as input.
*/
struct img_sample {
std::int64_t timestamp;
cv::Mat img;
bool is_left;
img_sample() = default;
img_sample(std::int64_t timestamp, const cv::Mat &img, bool is_left)
: timestamp(timestamp), img(img), is_left(is_left) {}
};
/*!
* @brief slam_tracker serves as an interface between Monado and external SLAM
* systems.
*
* This class uses the pointer-to-implementation pattern, and its implementation
* should be provided by an external SLAM system.
*/
struct slam_tracker {
/*!
* @brief Construct a new slam tracker object
*
* @param config_file SLAM systems parameters tend to be numerous and very
* specific, so they usually use a configuration file as the main way to set
* them up. Therefore, this constructor receives a path to a
* implementation-specific configuration file.
*/
slam_tracker(const std::string &config_file);
~slam_tracker();
slam_tracker(const slam_tracker &) = delete;
slam_tracker &operator=(const slam_tracker &) = delete;
void initialize();
void start();
bool is_running();
void stop();
void finalize();
/*!
* @brief Push an IMU sample into the tracker.
*
* There must be a single producer thread pushing samples.
* Samples must have monotonically increasing timestamps.
* The implementation must be non-blocking.
* Thus, a separate consumer thread should process the samples.
*/
void push_imu_sample(const imu_sample &sample);
/*!
* @brief Push an image sample into the tracker.
*
* Same conditions as @ref push_imu_sample apply.
* When using stereo frames, they must be pushed in a left-right order.
* The consecutive left-right pair must have the same timestamps.
*/
void push_frame(const img_sample &sample);
/*!
* @brief Get the latest tracked pose from the SLAM system.
*
* There must be a single thread consuming this method.
*
* @param[out] out_pose Dequeued pose.
* @return true If a new pose was dequeued into @p out_pose.
* @return false If there was no pose to dequeue.
*/
bool try_dequeue_pose(pose &out_pose);
//! Asks the SLAM system whether it supports a specific feature.
bool supports_feature(int feature_id);
/*!
* @brief Use a special feature of the SLAM tracker.
*
* This method uses heap allocated objects for passing parameters and
* obtaining the results. Use `std::static_pointer_cast` to shared pointers to
* the expected types.
*
* @param feature_id Id of the special feature.
* @param params Pointer to the parameter object for this feature.
* @param result Pointer to the result produced by the feature call.
* @return false if the feature was not supported, true otherwise.
*/
bool use_feature(int feature_id, const std::shared_ptr<void> &params,
std::shared_ptr<void> &result);
private:
struct implementation;
std::unique_ptr<implementation> impl;
};
/*
* Special features
*
* A special feature is comprised of an ID, a PARAMS type and a RESULT type. It
* can be defined using DEFINE_FEATURE. Once defined, the definition should not
* suffer future changes.
*
* One of the main concerns in the features interface is the ability to add new
* features without being required to update the SLAM systems that are not
* interested in implementing the feature.
*
*/
#define DEFINE_FEATURE(NAME, SHORT_NAME, ID, PARAMS_TYPE, RESULT_TYPE) \
using FPARAMS_##SHORT_NAME = PARAMS_TYPE; \
using FRESULT_##SHORT_NAME = RESULT_TYPE; \
constexpr int FID_##SHORT_NAME = ID; \
constexpr int F_##NAME = ID;
struct cam_calibration {
enum class cam_model { pinhole, fisheye };
int cam_index; //!< For multi-camera setups. For stereo 0 ~ left, 1 ~ right.
int width, height; //<! Resolution
double frequency; //<! Frames per second
double fx, fy; //<! Focal point
double cx, cy; //<! Principal point
cam_model model;
std::vector<double> model_params;
cv::Matx<double, 4, 4> T_cam_imu; //!< Transformation from camera to imu space
};
struct inertial_calibration {
// Calibration intrinsics to apply to each raw measurement.
//! This transform will be applied to raw measurements.
cv::Matx<double, 3, 3> transform;
//! Offset to apply to raw measurements to; called bias in other contexts.
cv::Matx<double, 3, 1> offset;
// Parameters for the random processes that model this IMU. See section "2.1
// Gyro Noise Model" of N. Trawny and S. I. Roumeliotis, "Indirect Kalman
// Filter for 3D Attitude Estimation". Analogous for accelerometers.
// http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf#page=15
//! IMU internal bias ~ wiener process with steps N(0, σ²); this field is σ;
//! [σ] = U / sqrt(sec³) with U = rad if gyroscope, U = m/s if accelerometer.
cv::Matx<double, 3, 1> bias_std;
//! IMU measurement noise ~ N(0, σ²); this field is σ.
//! [σ] = U / sqrt(sec) with U = rad if gyroscope, U = m/s if accelerometer.
cv::Matx<double, 3, 1> noise_std;
inertial_calibration() : transform(cv::Matx<double, 3, 3>::eye()) {}
};
struct imu_calibration {
int imu_index; //!< For multi-imu setups. Usually just 0.
double frequency; //!< Samples per second
inertial_calibration accel;
inertial_calibration gyro;
};
/*!
* Feature ADD_CAMERA_CALIBRATION
*
* Use it after constructor but before `start()` to write or overwrite camera
* calibration data that might come from the system-specific config file.
*/
DEFINE_FEATURE(ADD_CAMERA_CALIBRATION, ACC, 1, cam_calibration, void)
/*!
* Feature ADD_IMU_CALIBRATION
*
* Use it after constructor but before `start()` to write or overwrite IMU
* calibration data that might come from the system-specific config file.
*/
DEFINE_FEATURE(ADD_IMU_CALIBRATION, AIC, 2, imu_calibration, void)
/*!
* Feature ENABLE_POSE_EXT_TIMING
*
* Use it after constructor but before `start()`.
* Returns a vector with names for the timestamps in `pose_ext_timing`.
*/
DEFINE_FEATURE(ENABLE_POSE_EXT_TIMING, EPET, 3, void, std::vector<std::string>)
/*
* Pose extensions
*
* A pose extension is a struct that gets linked in the `pose.next` field. You
* first ask if the implementation supports enabling such extension with a
* `supports_feature()` call with the appropriate `ENABLE_POSE_EXT_*`. Then, it
* can be enabled with the corresponding `use_feature()` call.
*
*/
enum class pose_ext_type : int {
UNDEFINED = 0,
TIMING = 1,
};
struct pose_extension {
pose_ext_type type = pose_ext_type::UNDEFINED;
std::shared_ptr<pose_extension> next = nullptr;
pose_extension(pose_ext_type type) : type(type) {}
};
inline std::shared_ptr<pose_extension>
pose::find_pose_extension(pose_ext_type required_type) const {
std::shared_ptr<pose_extension> pe = next;
while (pe != nullptr && pe->type != required_type) {
pe = pe->next;
}
return pe;
}
struct pose_ext_timing : pose_extension {
//! Internal pipeline timestamps of interest when generating the pose. In
//! steady clock ns. Must have the same number of elements in the same run.
std::vector<std::int64_t> timestamps{};
pose_ext_timing() : pose_extension{pose_ext_type::TIMING} {}
};
} // namespace xrt::auxiliary::tracking::slam