相机标定系列
前两篇文章偏重理论,介绍了针孔相机模型、镜头畸变模型和张氏标定的原理。今天主要讲解代码实现,虽然很多成熟视觉框架已经包含了相机标定,opencv 、matlab、halcon、ros, 为了更深入的结合原理,还是有必要自己码一遍。
这里数据采用OpenCV data 中提供的left 和right标定版图像,我这里用left的,共有13张,棋盘格BorderSize是 9x6,单元格大小SquareSize是 25x25。
用到的库:
OpenCV (图像库)
Eigen3 (矩阵库)
Ceres (算法优化库)
算法实现步骤:
- 提取图像的棋盘格角点 imagePoints
- 设置棋盘格角点对应的世界坐标点(z=0) objectPoints
- 计算imagePoints 与 objectPoints 对应的单应性矩阵 共有13个H
- 对H进行分解,构建Vb = 0, 计算B矩阵和内参矩阵K
- 根据K计算,每张棋盘格的外参R和t
- 把畸变系数也考虑进去构建最优化算法,求出最优K 和 畸变系数 k1、k2、k3、p1、p2。
1. 提取图像的棋盘格角点
// 准备数据 13张
std::vector<std::string> files = {
"../data/images/left01.jpg",
"../data/images/left02.jpg",
"../data/images/left03.jpg",
"../data/images/left04.jpg",
"../data/images/left05.jpg",
"../data/images/left06.jpg",
"../data/images/left07.jpg",
"../data/images/left08.jpg",
"../data/images/left09.jpg",
"../data/images/left11.jpg",
"../data/images/left12.jpg",
"../data/images/left13.jpg",
"../data/images/left14.jpg",
};
// 2. 提取棋盘格角点
std::vector<std::vector<Eigen::Vector2d>> imagePoints;
std::vector<std::vector<Eigen::Vector3d>> objectPoints;
cv::Size boardSize(9, 6); // 棋盘格大小
cv::Size2f squareSize(25., 25.); // 单元格大小, 单位mm
for(int i=0; i<files.size(); ++i) {
cv::Mat img = cv::imread(files[i]);
std::vector<cv::Point2f> corners;
bool ok = cv::findChessboardCorners(img, boardSize, corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
if(ok) {
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
cv::cornerSubPix(gray, corners, cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));
cv::drawChessboardCorners(img, boardSize, cv::Mat(corners), ok);
cv::imshow("corners", img);
cv::waitKey(100);
std::vector<Eigen::Vector2d> _corners;
for(auto& pt: corners){
_corners.push_back(Eigen::Vector2d(pt.x, pt.y));
}
imagePoints.push_back(_corners);
}
}
2. 设置棋盘格角点对应的世界坐标点
//3.0 设置世界坐标
for(int i=0; i<imagePoints.size(); ++i){
std::vector<Eigen::Vector3d> corners;
getObjectPoints(boardSize, squareSize, corners);
objectPoints.push_back(corners);
}
void getObjectPoints(const cv::Size& borderSize, const cv::Size2f& squareSize, std::vector<Eigen::Vector3d>& objectPoints) {
for(int r=0; r<borderSize.height; ++r)
{
for(int c=0; c<borderSize.width; ++c) {
objectPoints.push_back(Eigen::Vector3d(c*squareSize.width, r*squareSize.height, 0.));
}
}
}
3. 计算imagePoints 与 objectPoints 对应的单应性矩阵
之前已经写过关于单应性矩阵的求解DLP算法,具体代码中已经包含。这里直接贴出来OpenCV求解
bool findHomographyByOpenCV(std::vector<Eigen::Vector2d>& srcPoints, std::vector<Eigen::Vector2d>& dstPoints, Eigen::Matrix3d& H) {
std::vector<cv::Point2f> objectPoints, imagePoints;
for(int i=0; i<srcPoints.size(); ++i) {
objectPoints.push_back(cv::Point2f(srcPoints[i](0), srcPoints[i](1)));
imagePoints.push_back(cv::Point2f(dstPoints[i](0), dstPoints[i](1)));
}
cv::Mat hMat = findHomography(objectPoints, imagePoints, cv::RANSAC);
cv::cv2eigen(hMat, H);
}
4. 对H进行分解,构建Vb = 0, 计算B矩阵并求解K内参矩阵
/**
* Vij=[hi1hj1 hi1hj2+hi2hj1 hi2hj2 hi3hj1+hi1hj3 hi3hj2+hi2hj3 hi3hj3]
* @param H
* @param i
* @param j
* @return
*/
VectorXd getVector(const Matrix3d& H, int i, int j)
{
i -= 1;
j -= 1;
VectorXd v(6);
v << H(0, i)*H(0, j), H(0, i)*H(1, j) + H(1, i)*H(0, j), H(1, i)*H(1, j), H(2, i)*H(0, j) + H(0, i)*H(2, j), H(2, i)*H(1, j) + H(1, i)*H(2, j), H(2, i)*H(2, j);
return v;
}
/**
*
* 计算相机内参初始值, 求解Vb =0; 并计算K矩阵
*
*/
Matrix3d solveInitCameraIntrinsic(std::vector<Matrix3d>& homos)
{
int n = homos.size();
// Vb = 0
MatrixXd V(2*n, 6);
for(int i=0; i<n; ++i)
{
VectorXd v1 = getVector(homos[i], 1, 2);
VectorXd v11 = getVector(homos[i], 1, 1);
VectorXd v22 = getVector(homos[i], 2, 2);
VectorXd v2 = v11 - v22;
for(int j=0; j<6; ++j)
{
V(2*i, j) = v1(j);
V(2*i+1, j) = v2(j);
}
}
//SVD 分解
JacobiSVD<MatrixXd> svdSolver (V, ComputeThinV);
MatrixXd v = svdSolver.matrixV();
MatrixXd b = v.rightCols(1);
std::cout <<"b = " << b << std::endl;
// 求解内参 fx fy c uo v0
double B11 = b(0), B12 = b(1), B22 = b(2), B13 = b(3), B23 = b(4), B33 = b(5);
double v0 = (B12*B13-B11*B23) / (B11*B22-B12*B12);
double s = B33-(B13*B13+v0*(B12*B13-B11*B23)) / B11;
double fx = sqrt(s/B11);
double fy = sqrt(s*B11 / (B11*B22-B12*B12));
double c = -B12*fx*fx*fy/s;
double u0 = c*v0/fx - B13*fx*fx/s;
Matrix3d K;
K << fx, c, u0,
0, fy, v0,
0, 0, 1;
return K;
}
5. 根据K计算,每张棋盘格的外参R和t
/**
*
* 计算相机外参初始值
*/
void solveInitCameraExtrinsic(std::vector<Matrix3d>& homos, Matrix3d& K, std::vector<Matrix3d>& RList, std::vector<Vector3d>& tList)
{
int n = homos.size();
Matrix3d kInv = K.inverse();
for (int i=0; i<n; ++i)
{
Vector3d r0, r1, r2;
r0 = kInv*homos[i].col(0);
r1 = kInv*homos[i].col(1);
double s0 = sqrt(r0.dot(r0));
double s1 = sqrt(r1.dot(r1));
r0.array().col(0) /= s0;
r1.array().col(0) /= s1;
r2 = r0.cross(r1);
Vector3d t = kInv*homos[i].col(2) / s0;
Matrix3d R;
R.array().col(0) = r0;
R.array().col(1) = r1;
R.array().col(2) = r2;
std::cout <<"R " << R << std::endl;
std::cout <<"t " << t.transpose() << std::endl;
RList.push_back(R);
tList.push_back(t);
}
}
6. 构建最优化目标函数
求出最优K 和 畸变系数 k1、k2、k3、p1、p2以及外参。
上面计算出的K,和13个外参 R和t作为最优化的初始值。
优化框架采用ceres优化框架,首先要编写优化结构体 PROJECT_COST:
struct PROJECT_COST {
Eigen::Vector3d objPt;
Eigen::Vector2d imgPt;
PROJECT_COST(Eigen::Vector3d& objPt, Eigen::Vector2d& imgPt):objPt(objPt), imgPt(imgPt)
{}
template<typename T>
bool operator()(
const T *const k,
const T *const r,
const T *const t,
T* residuals)const
{
T pos3d[3] = {T(objPt(0)), T(objPt(1)), T(objPt(2))};
T pos3d_proj[3];
// 旋转
ceres::AngleAxisRotatePoint(r, pos3d, pos3d_proj);
// 平移
pos3d_proj[0] += t[0];
pos3d_proj[1] += t[1];
pos3d_proj[2] += t[2];
T xp = pos3d_proj[0] / pos3d_proj[2];
T yp = pos3d_proj[1] / pos3d_proj[2];
const T& fx = k[0];
const T& fy = k[1];
const T& cx = k[2];
const T& cy = k[3];
const T& k1 = k[4];
const T& k2 = k[5];
const T& k3 = k[6];
const T& p1 = k[7];
const T& p2 = k[8];
T r_2 = xp*xp + yp*yp;
/*
// 径向畸变
T xdis = xp*(T(1.) + k1*r_2 + k2*r_2*r_2 + k3*r_2*r_2*r_2);
T ydis = yp*(T(1.) + k1*r_2 + k2*r_2*r_2 + k3*r_2*r_2*r_2);
// 切向畸变
xdis = xdis + T(2.)*p1*xp*yp + p2*(r_2 + T(2.)*xp*xp);
ydis = ydis + p1*(r_2 + T(2.)*yp*yp) + T(2.)*p2*xp*yp;
*/
// 径向畸变和切向畸变
T xdis = xp*(T(1.) + k1*r_2 + k2*r_2*r_2 + k3*r_2*r_2*r_2) + T(2.)*p1*xp*yp + p2*(r_2 + T(2.)*xp*xp);
T ydis = yp*(T(1.) + k1*r_2 + k2*r_2*r_2 + k3*r_2*r_2*r_2) + p1*(r_2 + T(2.)*yp*yp) + T(2.)*p2*xp*yp;
// 像素距离
T u = fx*xdis + cx;
T v = fy*ydis + cy;
residuals[0] = u - T(imgPt[0]);
residuals[1] = v - T(imgPt[1]);
return true;
}
};
具体使用
// 优化算法
{
//
ceres::Problem problem;
double k[9] = {K(0,0), K(1,1), K(0,2), K(1,2), 0., 0., 0., 0., 0.}; // 设置内参初值
for(int i=0; i<n; ++i) {
for(int j=0; j<imagePoints[i].size(); ++j) {
// 优化参数2->输出的残差数,表示x和y
// 9 表示 内参4个 畸变系数5个
// 3 外参,用旋转向量表示,输入需要把旋转矩阵转为旋转向量,再输入
// 3 外参 平移向量
ceres::CostFunction* costFunction=new ceres::AutoDiffCostFunction<PROJECT_COST, 2, 9, 3, 3>(
new PROJECT_COST(objectPoints[i][j], imagePoints[i][j]));
problem.AddResidualBlock(costFunction,
nullptr,
k,
rList[i].data(),
tList[i].data()
);
}
}
std::cout << " solve Options ..." << std::endl;
ceres::Solver::Options options;
options.minimizer_progress_to_stdout = true;
//options.linear_solver_type = ceres::DENSE_SCHUR;
//options.trust_region_strategy_type = ceres::TrustRegionStrategyType::LEVENBERG_MARQUARDT;
//options.preconditioner_type = ceres::JACOBI;
//options.sparse_linear_algebra_library_type = ceres::EIGEN_SPARSE;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << std::endl;
if (!summary.IsSolutionUsable())
{
std::cout << "Bundle Adjustment failed." << std::endl;
}
else
{
//summary.num_
// Display statistics about the minimization
std::cout << std::endl
<< "Bundle Adjustment statistics (approximated RMSE):\n"
<< " #views: " << n << "\n"
<< " #residuals: " << summary.num_residuals << "\n"
<< " #num_parameters: " << summary.num_parameters << "\n"
<< " #num_parameter_blocks: " << summary.num_parameter_blocks << "\n"
<< " Initial RMSE: " << std::sqrt(summary.initial_cost / summary.num_residuals) << "\n"
<< " Final RMSE: " << std::sqrt(summary.final_cost / summary.num_residuals) << "\n"
<< " Time (s): " << summary.total_time_in_seconds << "\n"
<< std::endl;
for(auto& a: k) std::cout << a << " " ;
//cv::Mat cameraMatrix, distCoeffs;
//cameraMatrix = (cv::Mat_<double>(3, 3) << k[0], 0.0, k[2], 0, k[1], k[3], 0, 0, 1);
//distCoeffs = (cv::Mat_<double>(1, 5) << k[4], k[5], k[7], k[8], k[6]);
// 输出优化结果
Eigen::Matrix3d cameraMatrix_;
cameraMatrix_ << k[0], 0.0, k[2], 0, k[1], k[3], 0, 0, 1;
Eigen::VectorXd distCoeffs_(5);
distCoeffs_ << k[4], k[5], k[7], k[8], k[6];
// 评价投影误差
std::vector<double> reprojErrs;
double totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rList, tList, cameraMatrix_, distCoeffs_, reprojErrs);
std::cout << " avg re projection error = " << totalAvgErr << std::endl;
for (size_t i = 0; i < reprojErrs.size(); i++)
{
std::cout << i << " projection error = " << reprojErrs[i] << std::endl;
}
// Mat
cv::eigen2cv(cameraMatrix_, cameraMatrix);
cv::eigen2cv(distCoeffs_, distCoeffs);
}
}
最后的结果
536.073 536.016 342.371 235.536 -0.265091 -0.0467182 0.252215 0.00183296 -0.000314464
avg re projection error = 0.234593
0 projection error = 0.16992
1 projection error = 0.846329
2 projection error = 0.159117
3 projection error = 0.176626
4 projection error = 0.141207
5 projection error = 0.162312
6 projection error = 0.18801
7 projection error = 0.214098
8 projection error = 0.22217
9 projection error = 0.153192
10 projection error = 0.177543
11 projection error = 0.28586
12 projection error = 0.15332
核心的代码和流程就这多,还有根据畸变系数对图像校正都在代码里,我把github地址也贴出来。
里面也包含鱼眼镜头的标定。代码结构如下:
附源码地址:
CameraCalibration
https://github.com/zhaitianyong/CameraCalibration