5.结构光:单目标定(OpenCV)

    xiaoxiao2021-03-26  7

    参考相关代码,基于OpenCV的单目视觉的标定的代码如下: #include "stdafx.h" #include<iostream> // opencv头文件 #include <cv.h> #include <highgui.h> #include <cxcore.h> using namespace std; using namespace cv; //设置一 图像尺寸 int image_width =2592; int image_height = 1944;//待标定图片的大小 //设置二 图像中X方向上的角点个数 const int ChessBoardSize_w = 9; //设置三 图像中Y方向上的角点个数 const int ChessBoardSize_h = 6; //图片中可标定的角点区域和个数 const CvSize ChessBoardSize = cvSize(ChessBoardSize_w,ChessBoardSize_h); const int NPoints = ChessBoardSize_w*ChessBoardSize_h;//单张图像中所有的角点个数 //设置四 待标定的图片数目 const int NImages=37; //设置五 设置棋盘格子的边长,单位为mm float SquareWidth = 10; //标定的结果保存 int corner_count[NImages] = {0}; CvMat *intrinsics;//内参数 CvMat *distortion_coeff;//畸变参数 CvMat *rotation_vectors;//旋转向量 CvMat *translation_vectors;//平移向量 CvMat *object_points; CvMat *point_counts; CvMat *image_points; CvMat *image_points2; void InitCorners3D(CvMat *Corners3D, CvSize ChessBoardSize, int Nimages, float SquareSize);//求取棋盘格上点的世界坐标 void main(void) { IplImage *current_frame_rgb; //连接棋盘格角点图像 IplImage *currentsamall_frame_rgb; //连接棋盘格角点图像(缩小大小后的,便于显示) IplImage *current_frame_gray;//灰度图像,用来提取角点 IplImage *chessBoard_Img;//原始载入图像 CvPoint2D32f corners[NPoints*NImages];//所有的角点坐标 //创建图像 chessBoard_Img =cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3); current_frame_gray = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 1); current_frame_rgb = cvCreateImage(cvSize(image_width, image_height), IPL_DEPTH_8U, 3); currentsamall_frame_rgb = cvCreateImage(cvSize(image_width/2, image_height/2), IPL_DEPTH_8U, 3); //当前图像序号 int captured_frames=0; for(captured_frames=0;captured_frames<NImages;captured_frames++) { // 类型转换 string img_name; char s[12]; _itoa(captured_frames + 1, s, 10); img_name = s; //img_name = "./cam1_gray/pattern_cam1_im" + img_name + ".png"; img_name = "./cam2_gray/pattern_cam2_im" + img_name + ".png"; const char *filename = img_name.c_str(); chessBoard_Img = cvLoadImage(filename, 1); if (chessBoard_Img==NULL) { printf("load image failed!"); } cvCvtColor(chessBoard_Img, current_frame_gray, CV_BGR2GRAY); cvCopy(chessBoard_Img,current_frame_rgb,0); //查找角点 int find_corners_result; find_corners_result = cvFindChessboardCorners(current_frame_gray,//当前图像 ChessBoardSize,//角度区域 棋盘图中每行和每列角点的个数 &corners[captured_frames*NPoints],// 检测到的角点 &corner_count[captured_frames],//int corner_count[NImages] = {0};输出,角点的个数。如果不是NULL,函数将检测到的角点的个数存储于此变量。 CV_CALIB_CB_ADAPTIVE_THRESH );//使用自适应阈值 //通过迭代来发现具有子象素精度的角点位置 cvFindCornerSubPix( current_frame_gray, //当前图像 &corners[captured_frames*NPoints],//检测到的角点 NPoints, //角点的个数 cvSize(5,5),cvSize(-1,-1), //不忽略corner临近的像素进行精确估计, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,30,0.01) ); //迭代次数(iteration)或者最小精度(epsilon) cvDrawChessboardCorners(current_frame_rgb, ChessBoardSize, //绘制检测到的棋盘角点 &corners[captured_frames*NPoints], NPoints, find_corners_result); cvSaveImage("角点绘图.bmp",current_frame_rgb); //cvResize(current_frame_rgb,currentsamall_frame_rgb); //cvNamedWindow("result", CV_WINDOW_AUTOSIZE); //cvShowImage("result",currentsamall_frame_rgb); cvWaitKey(100); } intrinsics = cvCreateMat(3,3,CV_32FC1); distortion_coeff = cvCreateMat(1,5,CV_32FC1); rotation_vectors = cvCreateMat(NImages,3,CV_32FC1); translation_vectors = cvCreateMat(NImages,3,CV_32FC1); point_counts = cvCreateMat(NImages,1,CV_32SC1); object_points = cvCreateMat(NImages*NPoints,3,CV_32FC1); image_points = cvCreateMat(NImages*NPoints,2,CV_32FC1); image_points2 = cvCreateMat(NImages*NPoints,2,CV_32FC1); //把2维点转化成三维点(object_points输出量), InitCorners3D(object_points, ChessBoardSize, NImages, SquareWidth); //把corners 复制到 image_points中 cvSetData( image_points, corners, sizeof(CvPoint2D32f)); //把corner_count 复制到 point_counts中 每个图像中多少个角点 cvSetData( point_counts, &corner_count, sizeof(int)); //计算内参 cvCalibrateCamera2( object_points, image_points, point_counts,//指定不同视图里点的数目 cvSize(image_width,image_height), intrinsics, distortion_coeff, rotation_vectors, translation_vectors, 0); float intr[3][3] = {0.0}; float dist[4] = {0.0}; float tranv[3] = {0.0}; float rotv[3] = {0.0}; for ( int i = 0; i < 3; i++) { for ( int j = 0; j < 3; j++) { intr[i][j] = ((float*)(intrinsics->data.ptr + intrinsics->step*i))[j]; } dist[i] = ((float*)(distortion_coeff->data.ptr))[i]; tranv[i] = ((float*)(translation_vectors->data.ptr))[i]; rotv[i] = ((float*)(rotation_vectors->data.ptr))[i]; } dist[3] = ((float*)(distortion_coeff->data.ptr))[3]; dist[4] = ((float*)(distortion_coeff->data.ptr))[4]; //旋转向量转换为旋转矩阵 double R_matrix[9]; CvMat pr_vec; CvMat pR_matrix; cvInitMatHeader(&pr_vec,1,3,CV_64FC1,rotv,CV_AUTOSTEP); cvInitMatHeader(&pR_matrix,3,3,CV_64FC1,R_matrix,CV_AUTOSTEP); cvRodrigues2(&pr_vec, &pR_matrix,0); //输出标定结果 printf("-----------------------------------------\n "); printf("INTRINSIC MATRIX: \n"); printf("[ %6.4f %6.4f %6.4f ] \n", intr[0][0], intr[0][1], intr[0][2]); printf("[ %6.4f %6.4f %6.4f ] \n", intr[1][0], intr[1][1], intr[1][2]); printf("[ %6.4f %6.4f %6.4f ] \n", intr[2][0], intr[2][1], intr[2][2]); printf("----------------------------------------- \n"); printf("DISTORTION VECTOR: \n"); printf("[ %6.4f %6.4f %6.4f %6.4f %6.4f] \n", dist[0], dist[1], dist[2], dist[3], dist[4]); printf("----------------------------------------- \n"); printf("ROTATION VECTOR(IMAGE1): \n"); printf("[ %6.4f %6.4f %6.4f ] \n", rotv[0], rotv[1], rotv[2]); printf("TRANSLATION VECTOR(IMAGE1): \n"); printf("[ %6.4f %6.4f %6.4f ] \n", tranv[0], tranv[1], tranv[2]); printf("----------------------------------------- \n"); printf("ROTATION MATRIX(IMAGE1): \n"); printf("[ %6.4f %6.4f %6.4f ] \n", R_matrix[0], R_matrix[1], R_matrix[2]); printf("[ %6.4f %6.4f %6.4f ] \n", R_matrix[3], R_matrix[4], R_matrix[5]); printf("[ %6.4f %6.4f %6.4f ] \n", R_matrix[6], R_matrix[7], R_matrix[8]); printf("----------------------------------------- \n"); //评价标定结果 double total_err = 0.0; double err = 0.0; double point_cousum=0; cout<<"\t每幅图像的定标误差:\n"; for (int i=0;i<NImages;i++) { //提取单张图像的参数 object_matrix rotation_matrix translation_matrix CvMat *object_matrix= cvCreateMat(NPoints,3,CV_32FC1); CvMat *image_matrix= cvCreateMat(NPoints,2,CV_32FC1); CvMat *project_image_matrix= cvCreateMat(NPoints,2,CV_32FC1); CvMat *rotation_matrix_1= cvCreateMat(1,3,CV_32FC1); CvMat *translation_matrix_1= cvCreateMat(1,3,CV_32FC1); CvMat *rotation_matrix= cvCreateMat(3,1,CV_32FC1); CvMat *translation_matrix= cvCreateMat(3,1,CV_32FC1); double err=0; cvGetRows(object_points,object_matrix,i*NPoints,(i+1)*NPoints,1); cvGetRow(rotation_vectors,rotation_matrix_1,i); cvReshape(rotation_matrix_1,rotation_matrix,0,3); cvGetRow(translation_vectors,translation_matrix_1,i); cvReshape(translation_matrix_1,translation_matrix,0,3); cvGetRows(image_points,project_image_matrix,i*NPoints,(i+1)*NPoints,1); //反向投影,投影到的点保存在image_matrix中 cvProjectPoints2(object_matrix,rotation_matrix,translation_matrix, intrinsics,distortion_coeff,image_matrix,0,0,0,0,0); err=cvNorm(image_matrix,project_image_matrix,CV_L2,0); total_err=err*err; point_cousum=point_cousum+(point_counts->data.ptr + point_counts->step*i)[0]; printf("第%d幅图像的误差为%f\n",i+1,sqrt(err*err/(point_counts->data.ptr + point_counts->step*i)[0])); } printf("总的图像的误差为%f\n",sqrt(total_err/point_cousum)); system("pause"); cvReleaseMat(&intrinsics); cvReleaseMat(&distortion_coeff); cvReleaseMat(&rotation_vectors); cvReleaseMat(&translation_vectors); cvReleaseMat(&point_counts); cvReleaseMat(&object_points); cvReleaseMat(&image_points); cvDestroyAllWindows(); } void InitCorners3D(CvMat *Corners3D, CvSize ChessBoardSize, int NImages, float SquareSize) { int CurrentImage = 0; int CurrentRow = 0; int CurrentColumn = 0; int NPoints = ChessBoardSize.height*ChessBoardSize.width; float * temppoints = new float[NImages*NPoints*3]; for (CurrentImage = 0 ; CurrentImage < NImages ; CurrentImage++) { for (CurrentRow = 0; CurrentRow < ChessBoardSize.height; CurrentRow++) { for (CurrentColumn = 0; CurrentColumn < ChessBoardSize.width; CurrentColumn++) { temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3]=(float)CurrentRow*SquareSize; temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3+1]=(float)CurrentColumn*SquareSize; temppoints[(CurrentImage*NPoints*3)+(CurrentRow*ChessBoardSize.width + CurrentColumn)*3+2]=0.f; } } } (*Corners3D) = cvMat(NImages*NPoints,3,CV_32FC1, temppoints); }
    转载请注明原文地址: https://ju.6miu.com/read-500385.html

    最新回复(0)