Added Cholmod support

This commit is contained in:
Vladyslav Usenko 2019-11-08 00:42:02 +08:00
parent c23fec1e7b
commit ff2ed88a01
3 changed files with 46 additions and 26 deletions

View File

@ -113,6 +113,7 @@ if(APPLE)
# See: https://github.com/openMVG/openMVG/issues/1349#issuecomment-401492811
set(CMAKE_FIND_FRAMEWORK LAST)
if(CMAKE_SYSTEM_VERSION VERSION_LESS 19.0.0)
# use brewed llvm's libc++
# Note: the suffix "/../v1" for the include path is needed to work around a recent cmake issue:
# https://gitlab.kitware.com/cmake/cmake/issues/19251#note_571030
@ -120,6 +121,10 @@ if(APPLE)
link_directories("/usr/local/opt/llvm/lib")
add_compile_options("-nostdinc++")
set(STD_CXX_FS c++fs)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(STATUS "Detected macOS with non-Apple clang")

View File

@ -33,8 +33,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BASALT_ACCUMULATOR_H
#define BASALT_ACCUMULATOR_H
#pragma once
#include <Eigen/Dense>
#include <Eigen/Sparse>
@ -45,6 +44,20 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <basalt/utils/assert.h>
#if defined(BASALT_USE_CHOLMOD)
#include <Eigen/CholmodSupport>
template <class T>
using SparseLLT = Eigen::CholmodSupernodalLLT<T>;
#else
template <class T>
using SparseLLT = Eigen::SimplicialLDLT<T>;
#endif
namespace basalt {
template <typename Scalar = double>
@ -195,7 +208,7 @@ class SparseHashAccumulator {
cg.compute(sm);
res = cg.solve(b);
} else {
Eigen::SimplicialLDLT<SparseMatrix> chol(sm);
SparseLLT<SparseMatrix> chol(sm);
res = chol.solve(b);
}
@ -261,5 +274,3 @@ class SparseHashAccumulator {
};
} // namespace basalt
#endif

View File

@ -87,10 +87,10 @@ class SplineOptimization {
typedef Se3Spline<N, Scalar> SplineT;
SplineOptimization(int64_t dt_ns = 1e7)
SplineOptimization(int64_t dt_ns = 1e7, double init_lambda = 1e-12)
: pose_var(1e-4),
mocap_initialized(false),
lambda(1e-12),
lambda(init_lambda),
min_lambda(1e-18),
max_lambda(100),
lambda_vee(2),
@ -367,7 +367,8 @@ class SplineOptimization {
bool optimize(bool use_intr, bool use_poses, bool use_april_corners,
bool opt_cam_time_offset, bool opt_imu_scale, bool use_mocap,
double huber_thresh, double stop_thresh, double& error,
int& num_points, double& reprojection_error) {
int& num_points, double& reprojection_error,
bool print_info = true) {
// std::cerr << "optimize num_knots " << num_knots << std::endl;
ccd.opt_intrinsics = use_intr;
@ -419,6 +420,7 @@ class SplineOptimization {
num_points = lopt.num_points;
reprojection_error = lopt.reprojection_error;
if (print_info)
std::cout << "[LINEARIZE] Error: " << lopt.error << " num points "
<< lopt.num_points << std::endl;
@ -473,6 +475,7 @@ class SplineOptimization {
double step_quality = f_diff / l_diff;
if (step_quality < 0) {
if (print_info)
std::cout << "\t[REJECTED] lambda:" << lambda
<< " step_quality: " << step_quality
<< " max_inc: " << max_inc << " Error: " << eopt.error
@ -486,6 +489,7 @@ class SplineOptimization {
g = g_backup;
} else {
if (print_info)
std::cout << "\t[ACCEPTED] lambda:" << lambda
<< " step_quality: " << step_quality
<< " max_inc: " << max_inc << " Error: " << eopt.error
@ -506,7 +510,7 @@ class SplineOptimization {
max_iter--;
}
if (converged) {
if (converged && print_info) {
std::cout << "[CONVERGED]" << std::endl;
}