ceres example application of various forms of ICP problems
ICP method mainly solves the problem of 3D-3D motion estimation of spatial point cloud,
Known:
t
1
t_1
t1 ÷ and
t
2
t_2
t2) two point cloud information perceived at any time (based on the sensor coordinate system), and
t
1
t_1
The coordinate of the sensor at time t1 in the global coordinate system
p
1
p_1
p1.
Solving: Sensors
t
2
t_2
t2 relative time
t
1
t_1
Change of posture at t1 ¢ time.
Usually, in the process of point cloud registration, the
t
2
t_2
The perceived point cloud at t2 ¢ time is called source
t
1
t_1
The perception point cloud at t1 ¢ time is called target.
The basic steps to solve ICP problems are as follows:
- determine t 1 t_1 t1, t 2 t_2 Initial value of sensor pose change between t2} time R 0 R_0 R0, t 0 t_0 t0, in the real vehicle case, take the dr value calculated by the fusion of imu and wheel tachometer.
- take t 2 t_2 The point cloud data at t2 ¢ time, i.e. source point cloud, is converted to the original coordinates of pose change t 1 t_1 t1 is in the coordinate system of target point cloud at time.
- The target point cloud is constructed as KDtree, and the source point cloud after coordinate conversion is in KDtree t a r g e t _{target} Find the nearest point in target and complete data association.
- Construct the cost function and optimize R and t. There are many forms of cost function, including point-to-point Euclidean distance, point-to-line vertical distance and vector Euclidean distance and included angle. The last form simplifies each location into a vector point for optimization when using location detection information.
- Iterative optimization. Take the optimized R and T in step 4 as the pose transformation values in step 1, and repeat steps 2, 3 and 4 until the R and t changes in the previous and subsequent iterations meet the requirements.
Using KDtree to solve the problem of data association has been introduced in another blog
Application of KDtree related library nanoflann
The following code is an example of using ceres to solve optimization problems:
void ScanAlign::Align(LocalizedRangeScan* pSourceScan, LocalizedRangeScan* pTargetScan, Pose2 &rBestpose, Eigen::Vector3d& rDelta) { SetSource(pSourceScan); SetTarget(pTargetScan); // construct posetransform from src to current. Eigen::Vector3d ypr_w_src(pSourceScan->GetPredictPose().GetYaw_Rad(), 0, 0); Eigen::Vector3d t_w_src(pSourceScan->GetPredictPose().GetX(), pSourceScan->GetPredictPose().GetY(), 0); common::PoseTransformation pose_w_src(ypr_w_src, t_w_src); // construct posetransform from target to current. Eigen::Vector3d ypr_w_target(pTargetScan->GetCorrectedPose().GetYaw_Rad(), 0, 0); Eigen::Vector3d t_w_target(pTargetScan->GetCorrectedPose().GetX(), pTargetScan->GetCorrectedPose().GetY(), 0); common::PoseTransformation pose_w_target(ypr_w_target, t_w_target); // src frame relatived to target frame. raw state current_transform_ = pose_w_src.GetTransform(pose_w_target); last_transform_ = current_transform_; for (size_t opti_counter = 0; opti_counter < 10; ++opti_counter) { // Data association: determine the nearest point of each point in the source in the target under the target coordinate system Association(); // Build The Problem ceres::Problem problem; ceres::Solver::Options options; options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; options.minimizer_progress_to_stdout = false; //options.max_solver_time_in_seconds = SOLVER_TIME * 3; options.max_num_iterations = 100; ceres::Solver::Summary summary; ceres::LossFunction *loss_function = new ceres::HuberLoss(0.1); // When solving parametric problems for two-dimensional problems, angle local parameterization is used because only yaw angle is used; // Quaternion parameterization is used when quaternions are used for 3D problems ceres::LocalParameterization* angle_local_parameterization = ceres::slam2d::AngleLocalParameterization::Create(); //Add parameter blocks problem.AddParameterBlock(current_transform_.ypr_.data(), 1, angle_local_parameterization); problem.AddParameterBlock(current_transform_.t_.data(), 2); double *t_array = current_transform_.t_.data(); double *euler_array = current_transform_.ypr_.data(); for(size_t src_index=0; src_index<m_vAssociation.size(); src_index++) { int target_index = m_vAssociation.at(src_index).first; if(target_index == -1) //no match is found for this source point continue; CloudPoint src_pt = m_stCloudPoints_src.at(src_index); CloudPoint target_pt = m_stCloudPoints_target.at(target_index); if(src_pt.type == 1) // parkingsite { //point to point constraint Eigen::DiagonalMatrix<double, 2> cov(0.1, 0.1); Eigen::Matrix2d sqrt_information = cov; sqrt_information = sqrt_information.inverse().llt().matrixL(); double weight = 1.0; ceres::CostFunction* cost_function = ceres::slam2d::PP2dErrorTerm::Create(src_pt.x, src_pt.y, target_pt.x, target_pt.y, sqrt_information, weight); problem.AddResidualBlock(cost_function, loss_function, euler_array, t_array); } else if(src_pt.type == 2) // laneline { //point to line constraint Eigen::Matrix<double, 1, 1> sqrt_information; sqrt_information(0, 0) = std::sqrt(1/0.1); double weight = 1.0; ceres::CostFunction* cost_function = ceres::slam2d::PL2dErrorTerm::Create(src_pt.x, src_pt.y, target_pt.x, target_pt.y, target_pt.norm_x, target_pt.norm_y, sqrt_information, weight); problem.AddResidualBlock(cost_function, loss_function, euler_array, t_array); } } // end add ResidualBlock //Solve the problem ceres::Solve(options, &problem, &summary); //update current transformation, ypr and t are updated by optimizer current_transform_.UpdateFromEulerAngle_Translation(); //check if we have converged common::PoseTransformation estimation_change = current_transform_.GetTransform(last_transform_); last_transform_ = current_transform_; if(estimation_change.t_.norm() < 1e-5 && estimation_change.ypr_.norm()< 1e-5){ break; } } //end iteration (association and optimization cycles) //we need to update the poseinfo object as well pose_w_src = pose_w_target*current_transform_; rBestpose.SetX(pose_w_src.t_[0]); rBestpose.SetY(pose_w_src.t_[1]); rBestpose.SetYaw_Rad(pose_w_src.ypr_[0]); rDelta[0] = current_transform_.t_[0]; // delta x rDelta[1] = current_transform_.t_[1]; // delta y rDelta[2] = current_transform_.ypr_[0]; // delta yaw // cout icp result // std::cout<<"icp_target_src : x = "<<current_transform_.t_[0] // <<" y = "<<current_transform_.t_[1] // <<" z = "<<current_transform_.t_[2] // <<" yaw = "<<current_transform_.ypr_[0] // <<" pitch = "<<current_transform_.ypr_[1] // <<" roll = "<<current_transform_.ypr_[2]<<std::endl<<std::endl; }
For different cost calculation methods, I wrote three cost function imitation functions.
The first is the point-to-point Euclidean distance, which is also the most basic form of icp
// Point to point struct PP2dErrorTerm { public: PP2dErrorTerm(double obs_src_x, double obs_src_y, double obs_target_x, double obs_target_y, Eigen::Matrix2d sqrt_information, double weight=1.0) :obs_src_x_(obs_src_x), obs_src_y_(obs_src_y), obs_target_x_(obs_target_x),obs_target_y_(obs_target_y), sqrt_information_(sqrt_information),weight_(weight){} template <typename T> bool operator()(const T* yaw, const T* t_param, T* residuals) const { const Eigen::Matrix<T, 2, 1> t(t_param[0], t_param[1]); const Eigen::Matrix<T, 2, 2> R = ceres::slam2d::RotationMatrix2D(*yaw); const Eigen::Vector2d obs_src_t(obs_src_x_, obs_src_y_); const Eigen::Vector2d obs_target_t(obs_target_x_, obs_target_y_); Eigen::Map<Eigen::Matrix<T, 2, 1> > residuals_map(residuals); residuals_map.template head<2>() = (R * obs_src_t + t - obs_target_t.cast<T>()); //weight these constraints residuals_map = residuals_map * weight_; // Scale the residuals by the square root information matrix to account for // the measurement uncertainty. residuals_map = sqrt_information_.template cast<T>() * residuals_map; return true; } static ceres::CostFunction* Create(double obs_src_x, double obs_src_y, double obs_target_x, double obs_target_y, Eigen::Matrix2d sqrt_information, double weight=1.0) { return (new ceres::AutoDiffCostFunction<PP2dErrorTerm, 2, 1, 2>( new PP2dErrorTerm(obs_src_x, obs_src_y, obs_target_x, obs_target_y, sqrt_information, weight))); } double obs_src_x_, obs_src_y_; double obs_target_x_, obs_target_y_; Eigen::Matrix2d sqrt_information_; double weight_; };
The second is the vertical distance from point to line. My calculation method is to first calculate the normal vector of each point in the target point cloud, and then use the normal vector to quickly solve the distance from point to line after data association. PCA principal component analysis is used to calculate the normal vector, which will be updated in the follow-up blog
// Point to line struct PL2dErrorTerm { public: PL2dErrorTerm(double obs_src_x, double obs_src_y, double obs_target_x, double obs_target_y, double obs_target_norm_x, double obs_target_norm_y, Eigen::Matrix<double, 1, 1> sqrt_information, double weight=1.0) :obs_src_x_(obs_src_x), obs_src_y_(obs_src_y), obs_target_x_(obs_target_x),obs_target_y_(obs_target_y), obs_target_norm_x_(obs_target_norm_x),obs_target_norm_y_(obs_target_norm_y), sqrt_information_(sqrt_information),weight_(weight){} template <typename T> bool operator()(const T* const yaw, const T* t_param, T* residuals) const { const Eigen::Matrix<T, 2, 1> t(t_param[0], t_param[1]); const Eigen::Matrix<T, 2, 2> R = ceres::slam2d::RotationMatrix2D(*yaw); const Eigen::Vector2d obs_src_t(obs_src_x_, obs_src_y_); const Eigen::Vector2d obs_target_t(obs_target_x_, obs_target_y_); const Eigen::Vector2d obs_target_norm(obs_target_norm_x_, obs_target_norm_y_); residuals[0] = (R * obs_src_t + t - obs_target_t ).dot(obs_target_norm); //weight these constraints residuals[0] = residuals[0] * weight_; // Scale the residuals by the square root information matrix to account for // the measurement uncertainty. residuals[0] = sqrt_information_(0, 0) * residuals[0]; return true; } static ceres::CostFunction* Create(double obs_src_x, double obs_src_y, double obs_target_x, double obs_target_y, double obs_target_norm_x, double obs_target_norm_y, Eigen::Matrix<double, 1, 1> sqrt_information, double weight=1.0) { return (new ceres::AutoDiffCostFunction< PL2dErrorTerm, 1, 1, 2>( new PL2dErrorTerm(obs_src_x, obs_src_y, obs_target_x, obs_target_y, obs_target_norm_x, obs_target_norm_y, sqrt_information, weight))); } double obs_src_x_, obs_src_y_; double obs_target_x_, obs_target_y_; double obs_target_norm_x_, obs_target_norm_y_; Eigen::Matrix<double, 1, 1> sqrt_information_; double weight_; };
The third is the Euclidean distance and included angle of vector points, which simplifies the location information into a two-dimensional point with direction in practical application, so as to carry out more accurate and fast location Association
// Point to point in vector form struct Vector2dErrorTerm { public: Vector2dErrorTerm(double obs_src_x, double obs_src_y, double obs_src_yaw, double obs_target_x, double obs_target_y,double obs_target_yaw, Eigen::Matrix3d sqrt_information, double weight=1.0) :obs_src_x_(obs_src_x), obs_src_y_(obs_src_y), obs_src_yaw_(obs_src_yaw), obs_target_x_(obs_target_x),obs_target_y_(obs_target_y),obs_target_yaw_(obs_target_yaw), sqrt_information_(sqrt_information),weight_(weight){} template <typename T> bool operator()(const T* const yaw, const T* t_param, T* residuals) const { const Eigen::Matrix<T, 2, 1> t(t_param[0], t_param[1]); const Eigen::Matrix<T, 2, 2> R = RotationMatrix2D(*yaw); const Eigen::Vector2d obs_src_t(obs_src_x_, obs_src_y_); const Eigen::Vector2d obs_target_t(obs_target_x_, obs_target_y_); const Eigen::Matrix<T, 2, 2> obs_src_R = RotationMatrix2D(static_cast<T>(obs_src_yaw_)); Eigen::Map<Eigen::Matrix<T, 3, 1> > residuals_map(residuals); residuals_map.template head<2>() = (R * obs_src_t + t - obs_target_t.cast<T>()); const Eigen::Matrix<T, 2, 2> expected_target_R = R * obs_src_R; residuals_map(2) = ceres::slam2d::NormalizeAngle(RotationMatrixToYaw(expected_target_R) - static_cast<T>(obs_target_yaw_)); //weight these constraints residuals_map = residuals_map * weight_; // Scale the residuals by the square root information matrix to account for // the measurement uncertainty. residuals_map = sqrt_information_.template cast<T>() * residuals_map; return true; } static ceres::CostFunction* Create(double obs_src_x, double obs_src_y, double obs_src_yaw, double obs_target_x, double obs_target_y, double obs_target_yaw, Eigen::Matrix3d sqrt_information, double weight=1.0) { return (new ceres::AutoDiffCostFunction<Vector2dErrorTerm, 3, 1, 2>( new Vector2dErrorTerm(obs_src_x, obs_src_y, obs_src_yaw, obs_target_x, obs_target_y,obs_target_yaw, sqrt_information, weight))); } double obs_src_x_, obs_src_y_, obs_src_yaw_; double obs_target_x_, obs_target_y_, obs_target_yaw_; Eigen::Matrix3d sqrt_information_; double weight_; };