2017-05-17 103 views
1

私はOpenCVの歪みモデルを使っていくつかのテストを行っていました。基本的に私がしたのは、歪み方程式を実装し、cv :: undistortPoints関数がこれらの方程式の逆を与えるかどうかを調べることです。私はcv::undistortPointsが歪み方程式の逆数を正確にあなたに与えていないことに気付きました。これを見たとき、私はcv :: undistortPointsの実装に行き、歪みモデルの逆数を計算する最後の条件では、OpenCVは常に5回の反復を実行することを認識しました(関数に与えられた歪み係数がない場合それは実際には0反復を行います)、ひずみのない点で誤差のメトリックを使用して、正確にひずみがないかどうかを確認しません。このことを念頭に置いて、私は、反復プロセスの終了条件をコピーして変更し、エラーメトリックを考慮に入れました。これは私に歪みモデルの正確な逆を与えました。これを示すコードはこの記事の最後に添付されています。私の質問です:OpenCVは精度(少し時間を費やして)よりもパフォーマンスが良い(少し時間を費やす)か、これはちょうど "バグ"なので、これが起こりますか? (私が提案する終了条件では、各点を歪ませるのに時間がかかることは明らかです)歪みモデルの正確な逆数を与えていないOpenCV undistortPoint

ありがとうございました!

#include <opencv2/core.hpp> 
#include <opencv2/imgproc.hpp> 
#include <iostream> 

using namespace cv; 

// This is a copy of the opencv implementation 
void cvUndistortPoints_copy(const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, 
        const CvMat* _distCoeffs, 
        const CvMat* matR, const CvMat* matP) 
{ 
    double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy; 
    CvMat matA=cvMat(3, 3, CV_64F, A), _Dk; 
    CvMat _RR=cvMat(3, 3, CV_64F, RR); 
    const CvPoint2D32f* srcf; 
    const CvPoint2D64f* srcd; 
    CvPoint2D32f* dstf; 
    CvPoint2D64f* dstd; 
    int stype, dtype; 
    int sstep, dstep; 
    int i, j, n, iters = 1; 

    CV_Assert(CV_IS_MAT(_src) && CV_IS_MAT(_dst) && 
     (_src->rows == 1 || _src->cols == 1) && 
     (_dst->rows == 1 || _dst->cols == 1) && 
     _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 && 
     (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) && 
     (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2)); 

    CV_Assert(CV_IS_MAT(_cameraMatrix) && 
     _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3); 

    cvConvert(_cameraMatrix, &matA); 

    if(_distCoeffs) 
    { 
     CV_Assert(CV_IS_MAT(_distCoeffs) && 
      (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) && 
      (_distCoeffs->rows*_distCoeffs->cols == 4 || 
      _distCoeffs->rows*_distCoeffs->cols == 5 || 
      _distCoeffs->rows*_distCoeffs->cols == 8)); 

     _Dk = cvMat(_distCoeffs->rows, _distCoeffs->cols, 
      CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k); 

     cvConvert(_distCoeffs, &_Dk); 
     iters = 5; 
    } 

    if(matR) 
    { 
     CV_Assert(CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3); 
     cvConvert(matR, &_RR); 
    } 
    else 
     cvSetIdentity(&_RR); 

    if(matP) 
    { 
     double PP[3][3]; 
     CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP); 
     CV_Assert(CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4)); 
     cvConvert(cvGetCols(matP, &_P3x3, 0, 3), &_PP); 
     cvMatMul(&_PP, &_RR, &_RR); 
    } 

    srcf = (const CvPoint2D32f*)_src->data.ptr; 
    srcd = (const CvPoint2D64f*)_src->data.ptr; 
    dstf = (CvPoint2D32f*)_dst->data.ptr; 
    dstd = (CvPoint2D64f*)_dst->data.ptr; 
    stype = CV_MAT_TYPE(_src->type); 
    dtype = CV_MAT_TYPE(_dst->type); 
    sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype); 
    dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype); 

    n = _src->rows + _src->cols - 1; 

    fx = A[0][0]; 
    fy = A[1][1]; 
    ifx = 1./fx; 
    ify = 1./fy; 
    cx = A[0][2]; 
    cy = A[1][2]; 

    for(i = 0; i < n; i++) 
    { 
     double x, y, x0, y0; 
     if(stype == CV_32FC2) 
     { 
      x = srcf[i*sstep].x; 
      y = srcf[i*sstep].y; 
     } 
     else 
     { 
      x = srcd[i*sstep].x; 
      y = srcd[i*sstep].y; 
     } 

     x0 = x = (x - cx)*ifx; 
     y0 = y = (y - cy)*ify; 

     // compensate distortion iteratively 
     int max_iters(500); 
     double e(1); 
     for(j = 0; j < max_iters && e>0; j++) 
     { 
      double r2 = x*x + y*y; 
      double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2); 
      double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); 
      double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; 
      double xant = x; 
      double yant = y; 
      x = (x0 - deltaX)*icdist; 
      y = (y0 - deltaY)*icdist; 
      e = pow(xant - x,2)+ pow(yant - y,2); 
     } 

     double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2]; 
     double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2]; 
     double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]); 
     x = xx*ww; 
     y = yy*ww; 

     if(dtype == CV_32FC2) 
     { 
      dstf[i*dstep].x = (float)x; 
      dstf[i*dstep].y = (float)y; 
     } 
     else 
     { 
      dstd[i*dstep].x = x; 
      dstd[i*dstep].y = y; 
     } 
    } 
} 
void undistortPoints_copy(InputArray _src, OutputArray _dst, 
          InputArray _cameraMatrix, 
          InputArray _distCoeffs, 
          InputArray _Rmat=noArray(), 
          InputArray _Pmat=noArray()) 
{ 
    Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); 
    Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); 

    CV_Assert(src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) && 
       ((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2)); 

    _dst.create(src.size(), src.type(), -1, true); 
    Mat dst = _dst.getMat(); 

    CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix; 
    CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0; 
    if(R.data) 
     pR = &(matR = R); 
    if(P.data) 
     pP = &(matP = P); 
    if(distCoeffs.data) 
     pD = &(_cdistCoeffs = distCoeffs); 
    cvUndistortPoints_copy(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP); 
} 

// Distortion model implementation 
cv::Point2d distortPoint(cv::Point2d undistorted_point,cv::Mat camera_matrix, std::vector<double> distort_coefficients){ 

    // Check that camera matrix is double 
    if (!(camera_matrix.type() == CV_64F || camera_matrix.type() == CV_64FC1)){ 
     std::ostringstream oss; 
     oss<<"distortPoint(): Camera matrix type is wrong. It has to be a double matrix (CV_64)"; 
     throw std::runtime_error(oss.str()); 
    } 

    // Create distorted point 
    cv::Point2d distortedPoint; 
    distortedPoint.x = (undistorted_point.x - camera_matrix.at<double>(0,2))/camera_matrix.at<double>(0,0); 
    distortedPoint.y = (undistorted_point.y - camera_matrix.at<double>(1,2))/camera_matrix.at<double>(1,1); 

    // Get model 
    if (distort_coefficients.size() < 4 || distort_coefficients.size() > 8){ 
     throw std::runtime_error("distortPoint(): Invalid numbrer of distortion coefficitnes."); 
    } 
    double k1(distort_coefficients[0]); 
    double k2(distort_coefficients[1]); 
    double p1(distort_coefficients[2]);// tangent distortion first coeficinet 
    double p2(distort_coefficients[3]);// tangent distortion second coeficinet 
    double k3(0); 
    double k4(0); 
    double k5(0); 
    double k6(0); 
    if (distort_coefficients.size() > 4) 
     k3 = distort_coefficients[4]; 
    if (distort_coefficients.size() > 5) 
     k4 = distort_coefficients[5]; 
    if (distort_coefficients.size() > 6) 
     k5 = distort_coefficients[6]; 
    if (distort_coefficients.size() > 7) 
     k6 = distort_coefficients[7]; 

    // Distort 
    double xcx = distortedPoint.x; 
    double ycy = distortedPoint.y; 
    double r2 = pow(xcx, 2) + pow(ycy, 2); 
    double r4 = pow(r2,2); 
    double r6 = pow(r2,3); 
    double k = (1+k1*r2+k2*r4+k3*r6)/(1+k4*r2+k5*r4+k5*r6); 
    distortedPoint.x = xcx*k + 2*p1*xcx*ycy + p2*(r2+2*pow(xcx,2)); 
    distortedPoint.y = ycy*k + p1*(r2+2*pow(ycy,2)) + 2*p2*xcx*ycy; 
    distortedPoint.x = distortedPoint.x*camera_matrix.at<double>(0,0)+camera_matrix.at<double>(0,2); 
    distortedPoint.y = distortedPoint.y*camera_matrix.at<double>(1,1)+camera_matrix.at<double>(1,2); 

    // Exit 
    return distortedPoint; 
} 

int main(int argc, char** argv){ 

    // Camera matrix 
    double cam_mat_da[] = {1486.58092,0,1046.72507,0,1489.8659,545.374244,0,0,1}; 
    cv::Mat cam_mat(3,3,CV_64FC1,cam_mat_da); 

    // Distortion coefficients 
    double dist_coefs_da[] ={-0.13827409,0.29240721,-0.00088197,-0.00090189,0}; 
    std::vector<double> dist_coefs(dist_coefs_da,dist_coefs_da+5); 

    // Distorted Point 
    cv::Point2d p0(0,0); 
    std::vector<cv::Point2d> p0_v; 
    p0_v.push_back(p0); 

    // Undistort Point 
    std::vector<cv::Point2d> ud_p_v; 
    cv::undistortPoints(p0_v,ud_p_v,cam_mat,dist_coefs); 
    cv::Point2d ud_p = ud_p_v[0]; 
    ud_p.x = ud_p.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2); 
    ud_p.y = ud_p.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2); 

    // Redistort Point 
    cv::Point2d p = distortPoint(ud_p, cam_mat,dist_coefs); 

    // Undistort Point using own termination of iterative process 
    std::vector<cv::Point2d> ud_p_v_local; 
    undistortPoints_copy(p0_v,ud_p_v_local,cam_mat,dist_coefs); 
    cv::Point2d ud_p_local = ud_p_v_local[0]; 
    ud_p_local.x = ud_p_local.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2); 
    ud_p_local.y = ud_p_local.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2); 

    // Redistort Point 
    cv::Point2d p_local = distortPoint(ud_p_local, cam_mat,dist_coefs); 

    // Display results 
    std::cout<<"Distorted original point: "<<p0<<std::endl; 
    std::cout<<"Undistorted point (CV): "<<ud_p<<std::endl; 
    std::cout<<"Distorted point (CV): "<<p<<std::endl; 
    std::cout<<"Erorr in the distorted point (CV): "<<sqrt(pow(p.x-p0.x,2)+pow(p.y-p0.y,2))<<std::endl; 
    std::cout<<"Undistorted point (Local): "<<ud_p_local<<std::endl; 
    std::cout<<"Distorted point (Local): "<<p_local<<std::endl; 
    std::cout<<"Erorr in the distorted point (Local): "<<sqrt(pow(p_local.x-p0.x,2)+pow(p_local.y-p0.y,2))<<std::endl; 

    // Exit 
    return 0; 
} 
+1

私は公式のopenCVフォーラムがこの質問にもっと適した場所だと思う:http://answers.opencv.org/questions/ – KjMag

+0

これは[there] [http://answers.opencv.org/question/147530/undistortpoints-not-giving-the-exact-inverse-of-distortion-model /)を入力します。 – apalomer

答えて

0

提案されているように、OpenCVフォーラムから実際の動機付けを得ることができます。しかし歴史的に、OpenCVはリアルタイムまたはリアルタイムに近いアプリケーションを念頭に置いて開発されているので(例えば、Darpa Grand Challenge)、精度以上の速度で最適化するコードを簡単に見つけることができます。

関連する問題