理想的相机模型应该是小孔成像模型,但由于透镜制造和相机镜头安装等因素,必然会导致畸变的存在。畸变主要分为径向畸变、切向畸变和薄棱镜畸变。
径向畸变主要由透镜本身导致的,远离透镜中心的光线比靠近中心的光线弯曲的更严重。图1显示矩形网格因镜像畸变而产生的位移。从前面看,光心越向外,矩形网格上的点的位移越大。
图1 透镜的径向畸变图,箭头显示径向畸变图像上外部矩形网格的偏移
具体参考Learning OpenCV,Page412
数学模型表示如下,其中(xd,yd)为畸变点的位置,(xu,yu)为无畸变点的位置,(k1,k2,k3,k4,k5,k6)为径向畸变参数,此处为OpenCV的径向畸变模型:
图2显示了两种典型的径向畸变,分别为桶形畸变和枕形畸变
切向畸变主要由镜头安装导致,当透镜不完全平行于图像平面的时候产生切向畸变。下图显示了某透镜的切向畸变图像。
数学模型表示如下,其中(xd,yd)为畸变点的位置,(xu,yu)为无畸变点的位置,(p1,p2)为切向畸变参数。
薄棱镜畸变一般由镜头设计和加工安装误差导致,一般情况下,可忽略此畸变。 数学模型可表示为,(s1,s2)为薄棱镜畸变参数。
一般情况下,薄棱镜畸变可以忽略不计,故OpenCV 2.0版本的畸变模型只考虑了径向畸变和切向畸变,畸变参数为(k1,k2,p1,p2,k3,k4,k5,k6)共8个参数。完整的数学模型如下所示(p3 为考虑切向畸变的扩展,此外设为0即可)
OpenCV提供对图像点的反畸变函数undistortPoints,该函数通过迭代优化来计算无畸变点的位置,为了计算速度而舍弃了一部分精度,为了获得更高的精度可以提高迭代次数或者使用LM算法来进行优化计算。
undistortPoints函数的使用,只求畸变点的位置,不进行对齐操作时,可将参数R设为Mat(),参数P设为内参矩阵cameraMatrix
void undistortSinglePoint(cv::Point2f pt_in, cv::Point2f &pt_out, cv::Mat mat_intrinsics1,cv::Mat mat_distcoeff1) { cv::Mat matInputPoint = cv::Mat::zeros(1, 1, CV_32FC2); matInputPoint.at<cv::Vec2f>(0, 0)[0] = pt_in.x; matInputPoint.at<cv::Vec2f>(0, 0)[1] = pt_in.y; undistortPoints(matInputPoint, matInputPoint, mat_intrinsics1, mat_distcoeff1, cv::Mat(), mat_intrinsics1); //undistortPoints(matInputPoint, matInputPoint, mat_intrinsics1, mat_distcoeff1, cv::Mat(), mat_intrinsics1); pt_out.x = matInputPoint.at<cv::Vec2f>(0, 0)[0]; pt_out.y = matInputPoint.at<cv::Vec2f>(0, 0)[1]; }查找undistortPoints的源代码,优化迭代次数iters
void cvUndistortPoints2(const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, const CvMat* _distCoeffs, const CvMat* matR, const CvMat* matP) { double A[3][3], RR[3][3], k[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }, fx, fy, ifx, ify, cx, cy; CvMat matA = cvMat(3, 3, CV_64F, A), _Dk; CvMat _RR = cvMat(3, 3, CV_64F, RR); const CvPoint2D32f* srcf; const CvPoint2D64f* srcd; CvPoint2D32f* dstf; CvPoint2D64f* dstd; int stype, dtype; int sstep, dstep; int i, j, n, iters = 1; CV_Assert(CV_IS_MAT(_src) && CV_IS_MAT(_dst) && (_src->rows == 1 || _src->cols == 1) && (_dst->rows == 1 || _dst->cols == 1) && _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 && (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) && (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2)); CV_Assert(CV_IS_MAT(_cameraMatrix) && _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3); cvConvert(_cameraMatrix, &matA); if (_distCoeffs) { CV_Assert(CV_IS_MAT(_distCoeffs) && (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) && (_distCoeffs->rows*_distCoeffs->cols == 4 || _distCoeffs->rows*_distCoeffs->cols == 5 || _distCoeffs->rows*_distCoeffs->cols == 8)); _Dk = cvMat(_distCoeffs->rows, _distCoeffs->cols, CV_MAKETYPE(CV_64F, CV_MAT_CN(_distCoeffs->type)), k); cvConvert(_distCoeffs, &_Dk); iters = 60; } if (matR) { CV_Assert(CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3); cvConvert(matR, &_RR); } else cvSetIdentity(&_RR); if (matP) { double PP[3][3]; CvMat _P3x3, _PP = cvMat(3, 3, CV_64F, PP); CV_Assert(CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4)); cvConvert(cvGetCols(matP, &_P3x3, 0, 3), &_PP); cvMatMul(&_PP, &_RR, &_RR); } srcf = (const CvPoint2D32f*)_src->data.ptr; srcd = (const CvPoint2D64f*)_src->data.ptr; dstf = (CvPoint2D32f*)_dst->data.ptr; dstd = (CvPoint2D64f*)_dst->data.ptr; stype = CV_MAT_TYPE(_src->type); dtype = CV_MAT_TYPE(_dst->type); sstep = _src->rows == 1 ? 1 : _src->step / CV_ELEM_SIZE(stype); dstep = _dst->rows == 1 ? 1 : _dst->step / CV_ELEM_SIZE(dtype); n = _src->rows + _src->cols - 1; fx = A[0][0]; fy = A[1][1]; ifx = 1. / fx; ify = 1. / fy; cx = A[0][2]; cy = A[1][2]; for (i = 0; i < n; i++) { double x, y, x0, y0; if (stype == CV_32FC2) { x = srcf[i*sstep].x; y = srcf[i*sstep].y; } else { x = srcd[i*sstep].x; y = srcd[i*sstep].y; } x0 = x = (x - cx)*ifx; y0 = y = (y - cy)*ify; // compensate distortion iteratively for (j = 0; j < iters; j++) { double r2 = x*x + y*y; double icdist = (1 + ((k[7] * r2 + k[6])*r2 + k[5])*r2) / (1 + ((k[4] * r2 + k[1])*r2 + k[0])*r2); double deltaX = 2 * k[2] * x*y + k[3] * (r2 + 2 * x*x); double deltaY = k[2] * (r2 + 2 * y*y) + 2 * k[3] * x*y; x = (x0 - deltaX)*icdist; y = (y0 - deltaY)*icdist; } double xx = RR[0][0] * x + RR[0][1] * y + RR[0][2]; double yy = RR[1][0] * x + RR[1][1] * y + RR[1][2]; double ww = 1. / (RR[2][0] * x + RR[2][1] * y + RR[2][2]); x = xx*ww; y = yy*ww; if (dtype == CV_32FC2) { dstf[i*dstep].x = (float)x; dstf[i*dstep].y = (float)y; } else { dstd[i*dstep].x = x; dstd[i*dstep].y = y; } } } void undistortPoints2(cv::Mat src, cv::Mat &dst, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat R, cv::Mat P) { //Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); //Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); CV_Assert(src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) && ((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2)); //_dst.create(src.size(), src.type(), -1, true); //Mat dst = _dst.getMat(); dst = cv::Mat(src.size(), src.type()); CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix; CvMat matR, matP, _cdistCoeffs, *pR = 0, *pP = 0, *pD = 0; if (!R.empty()) pR = &(matR = R); if (!P.empty()) pP = &(matP = P); if (!distCoeffs.empty()) pD = &(_cdistCoeffs = distCoeffs); CvMat *dstPt = cvCreateMat(src.rows, src.cols, src.type()); cvUndistortPoints2(&_csrc, dstPt, &_ccameraMatrix, pD, pR, pP); dst = cv::Mat(dstPt); }反过来,通过畸变模型从无畸变点得到畸变点可用distortSinglePoint得到
void distortSinglePoint(cv::Point2f pt_in, cv::Point2f &pt_out, cv::Mat mat_intrinsics, cv::Mat mat_dist_coeff) { float k1, k2, p1, p2; float fx, fy, cx, cy; k1 = mat_dist_coeff.at<double>(0, 0); k2 = mat_dist_coeff.at<double>(1, 0); p1 = mat_dist_coeff.at<double>(2, 0); p2 = mat_dist_coeff.at<double>(3, 0); fx = mat_intrinsics.at<double>(0, 0); fy = mat_intrinsics.at<double>(1, 1); cx = mat_intrinsics.at<double>(0, 2); cy = mat_intrinsics.at<double>(1, 2); float k3, k4, k5, k6; k3 = mat_dist_coeff.at<double>(4, 0); k4 = mat_dist_coeff.at<double>(5, 0); k5 = mat_dist_coeff.at<double>(6, 0); k6 = mat_dist_coeff.at<double>(7, 0); float xd = pt_in.x; float yd = pt_in.y; xd = (xd - cx) / fx; yd = (yd - cy) / fy; float xp = 0.0f; float yp = 0.0f; float r2, r4, r6; r2 = xd*xd + yd*yd; r4 = r2*r2; r6 = r4*r2; float dev = 1 + k4*r2 + k5*r4 + k6*r6; xp = (1 + k1*r2 + k2*r4 + k3*r6)*xd / dev + 2 * p1*xd*yd + p2*(r2 + 2 * xd*xd); yp = (1 + k1*r2 + k2*r4 + k3*r6)*yd / dev + p1*(r2 + 2 * yd*yd) + 2 * p2*xd*yd; xp = xp*fx + cx; yp = yp*fy + cy; pt_out.x = xp; pt_out.y = yp; }