Update slam_tracker.hpp to 4.0.0 (feature info pose extension)
This commit is contained in:
parent
e36bead967
commit
41b04f6292
|
@ -383,8 +383,8 @@ target_sources(basalt
|
|||
)
|
||||
|
||||
target_link_libraries(basalt
|
||||
PUBLIC ${STD_CXX_FS} basalt::opencv basalt::basalt-headers TBB::tbb pangolin basalt::cli11
|
||||
PRIVATE basalt::magic_enum rosbag apriltag opengv nlohmann::json fmt::fmt basalt::monado)
|
||||
PUBLIC ${STD_CXX_FS} basalt::opencv basalt::basalt-headers TBB::tbb pangolin basalt::cli11 basalt::monado
|
||||
PRIVATE basalt::magic_enum rosbag apriltag opengv nlohmann::json fmt::fmt)
|
||||
target_include_directories(basalt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
target_compile_definitions(basalt PUBLIC ${BASALT_COMPILE_DEFINITIONS})
|
||||
#target_compile_definitions(basalt PUBLIC BASALT_DISABLE_ASSERTS)
|
||||
|
|
|
@ -119,7 +119,9 @@ Run an OpenXR app like `hello_xr` with the following environment variables set
|
|||
export EUROC_PATH=/path/to/euroc/V1_01_easy/ # Set euroc dataset path. You can get a dataset from http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.zip
|
||||
export EUROC_LOG=debug
|
||||
export EUROC_HMD=false # if false, a fake controller will be tracked, else a fake HMD
|
||||
export EUROC_PLAY_FROM_START=true
|
||||
export SLAM_LOG=debug
|
||||
export SLAM_SUBMIT_FROM_START=true
|
||||
export SLAM_CONFIG=$bsltdeps/basalt/data/monado/euroc.toml # Point to Basalt config file for Euroc
|
||||
export OXR_DEBUG_GUI=1 # We will need the debug ui to start streaming the dataset
|
||||
```
|
||||
|
|
|
@ -45,12 +45,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <basalt/calibration/calibration.hpp>
|
||||
#include <basalt/camera/stereographic_param.hpp>
|
||||
#include <basalt/utils/sophus_utils.hpp>
|
||||
#include <slam_tracker.hpp>
|
||||
|
||||
#include <tbb/concurrent_queue.h>
|
||||
|
||||
namespace basalt {
|
||||
|
||||
using KeypointId = size_t;
|
||||
using xrt::auxiliary::tracking::slam::timestats;
|
||||
|
||||
struct OpticalFlowInput {
|
||||
using Ptr = std::shared_ptr<OpticalFlowInput>;
|
||||
|
@ -58,20 +60,9 @@ struct OpticalFlowInput {
|
|||
int64_t t_ns;
|
||||
std::vector<ImageData> img_data;
|
||||
|
||||
// Keep track of internal timestamps for this input
|
||||
bool timing_enabled = false;
|
||||
std::vector<int64_t> tss{};
|
||||
void addTime(const char* /* name */, int64_t custom_ts = INT64_MIN) {
|
||||
if (!timing_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (custom_ts != INT64_MIN) {
|
||||
tss.push_back(custom_ts);
|
||||
} else {
|
||||
auto ts = std::chrono::steady_clock::now().time_since_epoch().count();
|
||||
tss.push_back(ts);
|
||||
}
|
||||
timestats stats; //!< Keeps track of internal metrics for this t_ns
|
||||
void addTime(const char* name, int64_t custom_ts = INT64_MIN) {
|
||||
stats.addTime(name, custom_ts);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -63,6 +63,23 @@ string pose2str(const pose &p) {
|
|||
return str;
|
||||
}
|
||||
|
||||
static const vector<string> timing_titles{
|
||||
"frame_ts",
|
||||
"tracker_received",
|
||||
"tracker_pushed",
|
||||
"frames_received", // < Basalt computation starts
|
||||
"opticalflow_produced",
|
||||
"vio_start",
|
||||
"imu_preintegrated",
|
||||
"landmarks_updated",
|
||||
"optimized",
|
||||
"marginalized",
|
||||
"pose_produced", // Basalt computation ends
|
||||
"tracker_consumer_received",
|
||||
"tracker_consumer_pushed",
|
||||
"monado_dequeued",
|
||||
};
|
||||
|
||||
struct slam_tracker::implementation {
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
|
@ -110,14 +127,20 @@ struct slam_tracker::implementation {
|
|||
MargDataSaver::Ptr marg_data_saver;
|
||||
|
||||
// slam_tracker features
|
||||
unordered_set<int> supported_features{F_ADD_CAMERA_CALIBRATION, F_ADD_IMU_CALIBRATION, F_ENABLE_POSE_EXT_TIMING};
|
||||
unordered_set<int> supported_features{
|
||||
F_ADD_CAMERA_CALIBRATION,
|
||||
F_ADD_IMU_CALIBRATION,
|
||||
F_ENABLE_POSE_EXT_TIMING,
|
||||
F_ENABLE_POSE_EXT_FEATURES,
|
||||
};
|
||||
|
||||
// Additional calibration data
|
||||
vector<cam_calibration> added_cam_calibs{};
|
||||
vector<imu_calibration> added_imu_calibs{};
|
||||
|
||||
// Pose timing
|
||||
// Enabled pose extensions
|
||||
bool pose_timing_enabled = false;
|
||||
bool pose_features_enabled = false;
|
||||
|
||||
public:
|
||||
implementation(const string &unified_config) {
|
||||
|
@ -186,10 +209,20 @@ struct slam_tracker::implementation {
|
|||
p.timestamp = state->t_ns;
|
||||
p.next = nullptr;
|
||||
|
||||
if (pose_timing_enabled) {
|
||||
auto pose_timing = make_shared<pose_ext_timing>();
|
||||
pose_timing->timestamps = state->input_images->tss;
|
||||
p.next = pose_timing;
|
||||
auto *next = &p.next;
|
||||
|
||||
if (state->input_images->stats.timing_enabled) {
|
||||
pose_ext_timing_data petd = state->input_images->stats;
|
||||
auto pose_timing = make_shared<pose_ext_timing>(petd);
|
||||
*next = pose_timing;
|
||||
next = &pose_timing->next;
|
||||
}
|
||||
|
||||
if (state->input_images->stats.features_enabled) {
|
||||
pose_ext_features_data pefd = state->input_images->stats;
|
||||
auto pose_features = make_shared<pose_ext_features>(pefd);
|
||||
*next = pose_features;
|
||||
next = &pose_features->next;
|
||||
}
|
||||
|
||||
return p;
|
||||
|
@ -241,7 +274,7 @@ struct slam_tracker::implementation {
|
|||
apply_cam_calibration(c);
|
||||
}
|
||||
|
||||
bool calib_from_monado = added_cam_calibs.size() == 2;
|
||||
bool calib_from_monado = added_cam_calibs.size() == NUM_CAMS;
|
||||
bool view_offset_unknown = calib.view_offset(0) == 0 && calib.view_offset(1) == 0;
|
||||
if (calib_from_monado || view_offset_unknown) {
|
||||
compute_view_offset();
|
||||
|
@ -331,11 +364,24 @@ struct slam_tracker::implementation {
|
|||
int i = -1;
|
||||
if (s.is_left) {
|
||||
partial_frame = make_shared<OpticalFlowInput>();
|
||||
partial_frame->timing_enabled = pose_timing_enabled;
|
||||
partial_frame->addTime("frame_ts", s.timestamp);
|
||||
partial_frame->addTime("tracker_received");
|
||||
|
||||
partial_frame->img_data.resize(NUM_CAMS);
|
||||
partial_frame->t_ns = s.timestamp;
|
||||
|
||||
// Initialize stats
|
||||
partial_frame->stats.ts = s.timestamp;
|
||||
if (pose_timing_enabled) {
|
||||
partial_frame->stats.timing_enabled = true;
|
||||
partial_frame->stats.timing.reserve(timing_titles.size());
|
||||
partial_frame->stats.timing_titles = &timing_titles;
|
||||
partial_frame->addTime("frame_ts", s.timestamp);
|
||||
partial_frame->addTime("tracker_received");
|
||||
}
|
||||
if (pose_features_enabled) {
|
||||
partial_frame->stats.features_enabled = true;
|
||||
partial_frame->stats.features_per_cam.resize(NUM_CAMS);
|
||||
}
|
||||
|
||||
i = 0;
|
||||
} else {
|
||||
ASSERT(partial_frame->t_ns == s.timestamp, "Left and right frame timestamps differ: %ld != %ld",
|
||||
|
@ -384,7 +430,11 @@ struct slam_tracker::implementation {
|
|||
shared_ptr<FPARAMS_AIC> casted_params = static_pointer_cast<FPARAMS_AIC>(params);
|
||||
add_imu_calibration(*casted_params);
|
||||
} else if (feature_id == FID_EPET) {
|
||||
result = enable_pose_ext_timing();
|
||||
shared_ptr<FPARAMS_EPET> casted_params = static_pointer_cast<FPARAMS_EPET>(params);
|
||||
result = enable_pose_ext_timing(*casted_params);
|
||||
} else if (feature_id == FID_EPEF) {
|
||||
shared_ptr<FPARAMS_EPEF> casted_params = static_pointer_cast<FPARAMS_EPEF>(params);
|
||||
enable_pose_ext_features(*casted_params);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -510,24 +560,12 @@ struct slam_tracker::implementation {
|
|||
calib_data_ready.imu = true;
|
||||
}
|
||||
|
||||
shared_ptr<vector<string>> enable_pose_ext_timing() {
|
||||
pose_timing_enabled = true;
|
||||
vector<string> titles{"frame_ts",
|
||||
"tracker_received",
|
||||
"tracker_pushed",
|
||||
"frames_received", // < Basalt computation starts
|
||||
"opticalflow_produced",
|
||||
"vio_start",
|
||||
"imu_preintegrated",
|
||||
"landmarks_updated",
|
||||
"optimized",
|
||||
"marginalized",
|
||||
"pose_produced", // Basalt computation ends
|
||||
"tracker_consumer_received",
|
||||
"tracker_consumer_pushed",
|
||||
"monado_dequeued"};
|
||||
return make_shared<vector<string>>(titles);
|
||||
shared_ptr<vector<string>> enable_pose_ext_timing(bool enable) {
|
||||
pose_timing_enabled = enable;
|
||||
return make_shared<vector<string>>(timing_titles);
|
||||
}
|
||||
|
||||
void enable_pose_ext_features(bool enable) { pose_features_enabled = enable; }
|
||||
};
|
||||
|
||||
slam_tracker::slam_tracker(const string &config_file) { impl = make_unique<slam_tracker::implementation>(config_file); }
|
||||
|
|
|
@ -53,6 +53,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <tbb/parallel_reduce.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <slam_tracker.hpp>
|
||||
|
||||
namespace basalt {
|
||||
|
||||
|
@ -486,6 +487,14 @@ bool SqrtKeypointVioEstimator<Scalar_>::measure(
|
|||
optimize_and_marg(opt_flow_meas->input_images, num_points_connected,
|
||||
lost_landmaks);
|
||||
|
||||
const size_t num_cams = opt_flow_meas->observations.size();
|
||||
const bool features_ext = opt_flow_meas->input_images->stats.features_enabled;
|
||||
std::vector<Eigen::aligned_vector<Eigen::Vector4d>> projections{};
|
||||
if (features_ext || out_vis_queue) {
|
||||
projections.resize(num_cams);
|
||||
computeProjections(projections, last_state_t_ns);
|
||||
}
|
||||
|
||||
if (out_state_queue) {
|
||||
PoseVelBiasStateWithLin p = frame_states.at(last_state_t_ns);
|
||||
|
||||
|
@ -494,6 +503,21 @@ bool SqrtKeypointVioEstimator<Scalar_>::measure(
|
|||
|
||||
data->input_images = opt_flow_meas->input_images;
|
||||
data->input_images->addTime("pose_produced");
|
||||
|
||||
if (features_ext) {
|
||||
for (size_t i = 0; i < num_cams; i++) {
|
||||
for (const Eigen::Vector4d& v : projections[i]) {
|
||||
using Feature =
|
||||
xrt::auxiliary::tracking::slam::pose_ext_features_data::feature;
|
||||
Feature lm{};
|
||||
lm.id = v.w();
|
||||
lm.u = v.x();
|
||||
lm.v = v.y();
|
||||
lm.depth = v.z();
|
||||
data->input_images->stats.addFeature(i, lm);
|
||||
}
|
||||
}
|
||||
}
|
||||
out_state_queue->push(data);
|
||||
}
|
||||
|
||||
|
@ -513,8 +537,7 @@ bool SqrtKeypointVioEstimator<Scalar_>::measure(
|
|||
|
||||
get_current_points(data->points, data->point_ids);
|
||||
|
||||
data->projections.resize(opt_flow_meas->observations.size());
|
||||
computeProjections(data->projections, last_state_t_ns);
|
||||
data->projections = projections;
|
||||
|
||||
data->opt_flow_res = prev_opt_flow_res[last_state_t_ns];
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ size_t last_frame_processed = 0;
|
|||
tbb::concurrent_unordered_map<int64_t, int, std::hash<int64_t>> timestamp_to_id;
|
||||
|
||||
std::mutex m;
|
||||
std::condition_variable cv;
|
||||
std::condition_variable cvar;
|
||||
bool step_by_step = false;
|
||||
size_t max_frames = 0;
|
||||
|
||||
|
@ -169,7 +169,7 @@ void feed_images() {
|
|||
|
||||
if (step_by_step) {
|
||||
std::unique_lock<std::mutex> lk(m);
|
||||
cv.wait(lk);
|
||||
cvar.wait(lk);
|
||||
}
|
||||
|
||||
basalt::OpticalFlowInput::Ptr data(new basalt::OpticalFlowInput);
|
||||
|
@ -829,7 +829,7 @@ bool next_step() {
|
|||
if (show_frame < int(vio_dataset->get_image_timestamps().size()) - 1) {
|
||||
show_frame = show_frame + 1;
|
||||
show_frame.Meta().gui_changed = true;
|
||||
cv.notify_one();
|
||||
cvar.notify_one();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
|
|
@ -19,15 +19,17 @@
|
|||
#include <opencv2/core/mat.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <chrono>
|
||||
|
||||
namespace xrt::auxiliary::tracking::slam {
|
||||
|
||||
// For implementation: same as IMPLEMENTATION_VERSION_*
|
||||
// For user: expected IMPLEMENTATION_VERSION_*. Should be checked in runtime.
|
||||
constexpr int HEADER_VERSION_MAJOR = 3; //!< API Breakages
|
||||
constexpr int HEADER_VERSION_MAJOR = 4; //!< API Breakages
|
||||
constexpr int HEADER_VERSION_MINOR = 0; //!< Backwards compatible API changes
|
||||
constexpr int HEADER_VERSION_PATCH = 0; //!< Backw. comp. .h-implemented changes
|
||||
|
||||
|
@ -199,7 +201,7 @@ struct cam_calibration {
|
|||
double fx, fy; //<! Focal point
|
||||
double cx, cy; //<! Principal point
|
||||
std::string distortion_model; //!< Models like: none, rt4, rt5, rt8, kb4
|
||||
std::vector<double> distortion; //!< Parameters for the distortion_model
|
||||
std::vector<double> distortion{}; //!< Parameters for the distortion_model
|
||||
cv::Matx<double, 4, 4> t_imu_cam; //!< Transformation from IMU to camera
|
||||
};
|
||||
|
||||
|
@ -254,10 +256,17 @@ DEFINE_FEATURE(ADD_IMU_CALIBRATION, AIC, 2, imu_calibration, void)
|
|||
/*!
|
||||
* Feature ENABLE_POSE_EXT_TIMING
|
||||
*
|
||||
* Use it after constructor but before `start()`.
|
||||
* Enable/disable adding internal timestamps to the estimated poses.
|
||||
* Returns a vector with names for the timestamps in `pose_ext_timing`.
|
||||
*/
|
||||
DEFINE_FEATURE(ENABLE_POSE_EXT_TIMING, EPET, 3, void, std::vector<std::string>)
|
||||
DEFINE_FEATURE(ENABLE_POSE_EXT_TIMING, EPET, 3, bool, std::vector<std::string>)
|
||||
|
||||
/*!
|
||||
* Feature ENABLE_POSE_EXT_FEATURES
|
||||
*
|
||||
* Enable/disable adding feature information to the estimated poses.
|
||||
*/
|
||||
DEFINE_FEATURE(ENABLE_POSE_EXT_FEATURES, EPEF, 4, bool, void)
|
||||
|
||||
/*
|
||||
* Pose extensions
|
||||
|
@ -272,6 +281,7 @@ DEFINE_FEATURE(ENABLE_POSE_EXT_TIMING, EPET, 3, void, std::vector<std::string>)
|
|||
enum class pose_ext_type : int {
|
||||
UNDEFINED = 0,
|
||||
TIMING = 1,
|
||||
FEATURES = 2,
|
||||
};
|
||||
|
||||
struct pose_extension {
|
||||
|
@ -290,12 +300,79 @@ pose::find_pose_extension(pose_ext_type required_type) const {
|
|||
return pe;
|
||||
}
|
||||
|
||||
struct pose_ext_timing : pose_extension {
|
||||
//! Internal pipeline timestamps of interest when generating the pose. In
|
||||
//! steady clock ns. Must have the same number of elements in the same run.
|
||||
std::vector<std::int64_t> timestamps{};
|
||||
// Timing pose extension
|
||||
struct pose_ext_timing_data {
|
||||
//! Internal pipeline stage timestamps of interest when generating the pose.
|
||||
//! In steady clock ns. Must have the same number of elements in the same run.
|
||||
std::vector<std::int64_t> timing{};
|
||||
|
||||
//! Names of each timing stage. Should point to static memory.
|
||||
const std::vector<std::string> *timing_titles = nullptr;
|
||||
};
|
||||
|
||||
struct pose_ext_timing : pose_extension, pose_ext_timing_data {
|
||||
pose_ext_timing() : pose_extension{pose_ext_type::TIMING} {}
|
||||
pose_ext_timing(const pose_ext_timing_data &petd)
|
||||
: pose_extension{pose_ext_type::TIMING}, pose_ext_timing_data{petd} {}
|
||||
};
|
||||
|
||||
// Features pose extension
|
||||
struct pose_ext_features_data {
|
||||
struct feature {
|
||||
std::int64_t id;
|
||||
float u;
|
||||
float v;
|
||||
float depth;
|
||||
};
|
||||
|
||||
std::vector<std::vector<feature>> features_per_cam{};
|
||||
};
|
||||
|
||||
struct pose_ext_features : pose_extension, pose_ext_features_data {
|
||||
pose_ext_features() : pose_extension{pose_ext_type::FEATURES} {}
|
||||
pose_ext_features(const pose_ext_features_data &pefd)
|
||||
: pose_extension{pose_ext_type::FEATURES}, pose_ext_features_data{pefd} {}
|
||||
};
|
||||
|
||||
/*!
|
||||
* Utility object to keep track of different stats for a particular timestamp.
|
||||
* Stats usually correspond with a particular pose extension.
|
||||
*/
|
||||
struct timestats : pose_ext_timing_data, pose_ext_features_data {
|
||||
using ptr = std::shared_ptr<timestats>;
|
||||
|
||||
std::int64_t ts = -1;
|
||||
bool timing_enabled = false;
|
||||
bool features_enabled = false;
|
||||
|
||||
void addTime(const char *name, int64_t ts = INT64_MIN) {
|
||||
if (!timing_enabled) {
|
||||
return;
|
||||
}
|
||||
if (timing_titles) {
|
||||
std::string expected = timing_titles->at(timing.size());
|
||||
if (expected != name) {
|
||||
std::cout << "Invalid timing stage\n";
|
||||
std::cout << "expected: " << expected;
|
||||
std::cout << ", got: " << name << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
if (ts == INT64_MIN) {
|
||||
ts = std::chrono::steady_clock::now().time_since_epoch().count();
|
||||
}
|
||||
timing.push_back(ts);
|
||||
}
|
||||
|
||||
void addFeature(size_t cam, const feature &f) {
|
||||
if (!features_enabled) {
|
||||
return;
|
||||
}
|
||||
if (cam >= features_per_cam.size()) {
|
||||
features_per_cam.resize(cam + 1);
|
||||
}
|
||||
features_per_cam.at(cam).push_back(f);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace xrt::auxiliary::tracking::slam
|
||||
|
|
Loading…
Reference in New Issue