ICP方法主要解决空间点云3D-3D的运动估计问题,
已知:
t
1
t_1
t1和
t
2
t_2
t2时刻感知到的两个点云信息(基于传感器坐标系),和
t
1
t_1
t1时刻的传感器位于全局坐标系下的坐标
p
1
p_1
p1。
求解:传感器
t
2
t_2
t2时刻相对
t
1
t_1
t1时刻的位姿变化。
通常,在点云配准过程中,将
t
2
t_2
t2时刻的感知点云称为source,将
t
1
t_1
t1时刻的感知点云称为target。
解决ICP问题的基本步骤如下:
- 确定 t 1 t_1 t1、 t 2 t_2 t2时刻之间的传感器位姿变化初值 R 0 R_0 R0、 t 0 t_0 t0,实车案例中取imu与轮速计融合计算的dr值。
- 将 t 2 t_2 t2时刻的点云数据,即source点云,利用位姿变化初值坐标转换至 t 1 t_1 t1时刻target点云的坐标系下。
- 将target点云构建为KDtree,坐标转换后的source点云在KDtree t a r g e t _{target} target中找到最近点,完成数据关联。
- 构建代价函数,进行R、t优化。代价函数有多种形式,点对点的欧氏距离,点对线的垂直距离以及矢量的欧氏距离和夹角,最后一种形式在利用库位检测信息时将每个库位简化为一个矢量点进行优化。
- 迭代优化。将第4步优化后的R、t作为第1步的位姿变换值,重新第2、3、4步,直到前后两次迭代的R、t变化值满足要求。
利用KDtree解决数据关联问题,在另一个博客中已经介绍
KDtree相关库nanoflann的应用
以下代码为利用ceres解决优化问题的示例:
void ScanAlign::Align(LocalizedRangeScan* pSourceScan, LocalizedRangeScan* pTargetScan,
Pose2 &rBestpose, Eigen::Vector3d& rDelta)
{
SetSource(pSourceScan);
SetTarget(pTargetScan);
// construct posetransform from src to current.
Eigen::Vector3d ypr_w_src(pSourceScan->GetPredictPose().GetYaw_Rad(),
0,
0);
Eigen::Vector3d t_w_src(pSourceScan->GetPredictPose().GetX(),
pSourceScan->GetPredictPose().GetY(),
0);
common::PoseTransformation pose_w_src(ypr_w_src, t_w_src);
// construct posetransform from target to current.
Eigen::Vector3d ypr_w_target(pTargetScan->GetCorrectedPose().GetYaw_Rad(),
0,
0);
Eigen::Vector3d t_w_target(pTargetScan->GetCorrectedPose().GetX(),
pTargetScan->GetCorrectedPose().GetY(),
0);
common::PoseTransformation pose_w_target(ypr_w_target, t_w_target);
// src frame relatived to target frame. raw state
current_transform_ = pose_w_src.GetTransform(pose_w_target);
last_transform_ = current_transform_;
for (size_t opti_counter = 0; opti_counter < 10; ++opti_counter)
{
// 数据关联:统一在target坐标系下,确定source中每个点在target中的最近点
Association();
// Build The Problem
ceres::Problem problem;
ceres::Solver::Options options;
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
options.minimizer_progress_to_stdout = false;
//options.max_solver_time_in_seconds = SOLVER_TIME * 3;
options.max_num_iterations = 100;
ceres::Solver::Summary summary;
ceres::LossFunction *loss_function = new ceres::HuberLoss(0.1);
// 二维问题解决过参数化问题时,由于只使用 yaw角,故使用AngleLocalParameterization;
// 三维问题使用四元数时,则使用 QuaternionParameterization
ceres::LocalParameterization* angle_local_parameterization = ceres::slam2d::AngleLocalParameterization::Create();
//Add parameter blocks
problem.AddParameterBlock(current_transform_.ypr_.data(), 1, angle_local_parameterization);
problem.AddParameterBlock(current_transform_.t_.data(), 2);
double *t_array = current_transform_.t_.data();
double *euler_array = current_transform_.ypr_.data();
for(size_t src_index=0; src_index cov(0.1, 0.1);
Eigen::Matrix2d sqrt_information = cov;
sqrt_information = sqrt_information.inverse().llt().matrixL();
double weight = 1.0;
ceres::CostFunction* cost_function = ceres::slam2d::PP2dErrorTerm::Create(src_pt.x, src_pt.y, target_pt.x, target_pt.y,
sqrt_information, weight);
problem.AddResidualBlock(cost_function, loss_function, euler_array, t_array);
}
else if(src_pt.type == 2) // laneline
{
//point to line constraint
Eigen::Matrix sqrt_information;
sqrt_information(0, 0) = std::sqrt(1/0.1);
double weight = 1.0;
ceres::CostFunction* cost_function = ceres::slam2d::PL2dErrorTerm::Create(src_pt.x, src_pt.y,
target_pt.x, target_pt.y,
target_pt.norm_x, target_pt.norm_y,
sqrt_information, weight);
problem.AddResidualBlock(cost_function, loss_function, euler_array, t_array);
}
} // end add ResidualBlock
//Solve the problem
ceres::Solve(options, &problem, &summary);
//update current transformation, ypr and t are updated by optimizer
current_transform_.UpdateFromEulerAngle_Translation();
//check if we have converged
common::PoseTransformation estimation_change = current_transform_.GetTransform(last_transform_);
last_transform_ = current_transform_;
if(estimation_change.t_.norm() < 1e-5 && estimation_change.ypr_.norm()< 1e-5){
break;
}
} //end iteration (association and optimization cycles)
//we need to update the poseinfo object as well
pose_w_src = pose_w_target*current_transform_;
rBestpose.SetX(pose_w_src.t_[0]);
rBestpose.SetY(pose_w_src.t_[1]);
rBestpose.SetYaw_Rad(pose_w_src.ypr_[0]);
rDelta[0] = current_transform_.t_[0]; // delta x
rDelta[1] = current_transform_.t_[1]; // delta y
rDelta[2] = current_transform_.ypr_[0]; // delta yaw
// cout icp result
// std::cout<<"icp_target_src : x = "< 


