Added Cholmod support

This commit is contained in:
Vladyslav Usenko 2019-11-08 00:42:02 +08:00
parent c23fec1e7b
commit ff2ed88a01
3 changed files with 46 additions and 26 deletions

View File

@ -113,13 +113,18 @@ if(APPLE)
# See: https://github.com/openMVG/openMVG/issues/1349#issuecomment-401492811 # See: https://github.com/openMVG/openMVG/issues/1349#issuecomment-401492811
set(CMAKE_FIND_FRAMEWORK LAST) set(CMAKE_FIND_FRAMEWORK LAST)
# use brewed llvm's libc++ if(CMAKE_SYSTEM_VERSION VERSION_LESS 19.0.0)
# Note: the suffix "/../v1" for the include path is needed to work around a recent cmake issue: # use brewed llvm's libc++
# https://gitlab.kitware.com/cmake/cmake/issues/19251#note_571030 # Note: the suffix "/../v1" for the include path is needed to work around a recent cmake issue:
include_directories("/usr/local/opt/llvm/include/c++/v1/../v1") # https://gitlab.kitware.com/cmake/cmake/issues/19251#note_571030
link_directories("/usr/local/opt/llvm/lib") include_directories("/usr/local/opt/llvm/include/c++/v1/../v1")
add_compile_options("-nostdinc++") link_directories("/usr/local/opt/llvm/lib")
set(STD_CXX_FS c++fs) add_compile_options("-nostdinc++")
set(STD_CXX_FS c++fs)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(STATUS "Detected macOS with non-Apple clang") message(STATUS "Detected macOS with non-Apple clang")

View File

@ -33,8 +33,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef BASALT_ACCUMULATOR_H #pragma once
#define BASALT_ACCUMULATOR_H
#include <Eigen/Dense> #include <Eigen/Dense>
#include <Eigen/Sparse> #include <Eigen/Sparse>
@ -45,6 +44,20 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <basalt/utils/assert.h> #include <basalt/utils/assert.h>
#if defined(BASALT_USE_CHOLMOD)
#include <Eigen/CholmodSupport>
template <class T>
using SparseLLT = Eigen::CholmodSupernodalLLT<T>;
#else
template <class T>
using SparseLLT = Eigen::SimplicialLDLT<T>;
#endif
namespace basalt { namespace basalt {
template <typename Scalar = double> template <typename Scalar = double>
@ -195,7 +208,7 @@ class SparseHashAccumulator {
cg.compute(sm); cg.compute(sm);
res = cg.solve(b); res = cg.solve(b);
} else { } else {
Eigen::SimplicialLDLT<SparseMatrix> chol(sm); SparseLLT<SparseMatrix> chol(sm);
res = chol.solve(b); res = chol.solve(b);
} }
@ -261,5 +274,3 @@ class SparseHashAccumulator {
}; };
} // namespace basalt } // namespace basalt
#endif

View File

@ -87,10 +87,10 @@ class SplineOptimization {
typedef Se3Spline<N, Scalar> SplineT; typedef Se3Spline<N, Scalar> SplineT;
SplineOptimization(int64_t dt_ns = 1e7) SplineOptimization(int64_t dt_ns = 1e7, double init_lambda = 1e-12)
: pose_var(1e-4), : pose_var(1e-4),
mocap_initialized(false), mocap_initialized(false),
lambda(1e-12), lambda(init_lambda),
min_lambda(1e-18), min_lambda(1e-18),
max_lambda(100), max_lambda(100),
lambda_vee(2), lambda_vee(2),
@ -367,7 +367,8 @@ class SplineOptimization {
bool optimize(bool use_intr, bool use_poses, bool use_april_corners, bool optimize(bool use_intr, bool use_poses, bool use_april_corners,
bool opt_cam_time_offset, bool opt_imu_scale, bool use_mocap, bool opt_cam_time_offset, bool opt_imu_scale, bool use_mocap,
double huber_thresh, double stop_thresh, double& error, double huber_thresh, double stop_thresh, double& error,
int& num_points, double& reprojection_error) { int& num_points, double& reprojection_error,
bool print_info = true) {
// std::cerr << "optimize num_knots " << num_knots << std::endl; // std::cerr << "optimize num_knots " << num_knots << std::endl;
ccd.opt_intrinsics = use_intr; ccd.opt_intrinsics = use_intr;
@ -419,8 +420,9 @@ class SplineOptimization {
num_points = lopt.num_points; num_points = lopt.num_points;
reprojection_error = lopt.reprojection_error; reprojection_error = lopt.reprojection_error;
std::cout << "[LINEARIZE] Error: " << lopt.error << " num points " if (print_info)
<< lopt.num_points << std::endl; std::cout << "[LINEARIZE] Error: " << lopt.error << " num points "
<< lopt.num_points << std::endl;
lopt.accum.setup_solver(); lopt.accum.setup_solver();
Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal(); Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal();
@ -473,10 +475,11 @@ class SplineOptimization {
double step_quality = f_diff / l_diff; double step_quality = f_diff / l_diff;
if (step_quality < 0) { if (step_quality < 0) {
std::cout << "\t[REJECTED] lambda:" << lambda if (print_info)
<< " step_quality: " << step_quality std::cout << "\t[REJECTED] lambda:" << lambda
<< " max_inc: " << max_inc << " Error: " << eopt.error << " step_quality: " << step_quality
<< " num points " << eopt.num_points << std::endl; << " max_inc: " << max_inc << " Error: " << eopt.error
<< " num points " << eopt.num_points << std::endl;
lambda = std::min(max_lambda, lambda_vee * lambda); lambda = std::min(max_lambda, lambda_vee * lambda);
lambda_vee *= 2; lambda_vee *= 2;
@ -486,10 +489,11 @@ class SplineOptimization {
g = g_backup; g = g_backup;
} else { } else {
std::cout << "\t[ACCEPTED] lambda:" << lambda if (print_info)
<< " step_quality: " << step_quality std::cout << "\t[ACCEPTED] lambda:" << lambda
<< " max_inc: " << max_inc << " Error: " << eopt.error << " step_quality: " << step_quality
<< " num points " << eopt.num_points << std::endl; << " max_inc: " << max_inc << " Error: " << eopt.error
<< " num points " << eopt.num_points << std::endl;
lambda = std::max( lambda = std::max(
min_lambda, min_lambda,
@ -506,7 +510,7 @@ class SplineOptimization {
max_iter--; max_iter--;
} }
if (converged) { if (converged && print_info) {
std::cout << "[CONVERGED]" << std::endl; std::cout << "[CONVERGED]" << std::endl;
} }