首先orb_slam2的话,github下载源码编译很容易,按照官方github下面的教程走就行。晒几张TUM数据集的结果:
desk:
room:
效果很棒,模拟轨迹和groundtruth的绝对误差真的和论文上说的一样小。我觉得ORB_SLAM2真的是现在视觉SLAM里最优秀的一版,考虑的非常全面。
那么如果我们要用到自己的项目中,该怎么调用呢?特别棒的一点是,原作者提供了libORB_SLAM2.so给我们,加上头文件System.h,我们就可以把ORB_SLAM作为一个整体加到我们的项目中。但是源码中并没有3D建图的模块,需要做相应改变,高博士为我们提供了加了3D建图模块的libORB_SLAM2.so,这时我们就可以根据自己的需求(kinect,xtion或其他可以获得点云的sensors)。高博的博客中有一篇是用的kinect2,在ROS运行的orb_slam2,今天我们来试一试不用ROS,通过OpenNI2直接调用xtion获取rgb数据和depth数据来重建环境(之前有windows上运行openni2_xtion的经验)。我的建议,要么xtion要么kinect2, 因为kinect很鸡肋,xtion比它轻巧,kinect2比它分辨率高。(源码下载csdn 源码下载github)(词袋文件太大,各位可以从官方github下载)
扯了这么多,现在拉回主线。在ORB_SLAM2/Examples/RGB-D/中
,创建rgbd_xtion_cc.cpp:
#include <iostream> #include <algorithm> #include <fstream> #include <chrono> #include <OpenNI.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <System.h> // orb_slam2 using namespace std; using namespace openni; using namespace cv; void showdevice(){ // 获取设备信息 Array<DeviceInfo> aDeviceList; OpenNI::enumerateDevices(&aDeviceList); cout << "电脑上连接着 " << aDeviceList.getSize() << " 个体感设备." << endl; for (int i = 0; i < aDeviceList.getSize(); ++i) { cout << "设备 " << i << endl; const DeviceInfo& rDevInfo = aDeviceList[i]; cout << "设备名: " << rDevInfo.getName() << endl; cout << "设备Id: " << rDevInfo.getUsbProductId() << endl; cout << "供应商名: " << rDevInfo.getVendor() << endl; cout << "供应商Id: " << rDevInfo.getUsbVendorId() << endl; cout << "设备URI: " << rDevInfo.getUri() << endl; } } Status initstream(Status& rc, Device& xtion, VideoStream& streamDepth, VideoStream& streamColor) { rc = STATUS_OK; // 创建深度数据流 rc = streamDepth.create(xtion, SENSOR_DEPTH); if (rc == STATUS_OK) { // 设置深度图像视频模式 VideoMode mModeDepth; // 分辨率大小 mModeDepth.setResolution(640, 480); // 每秒30帧 mModeDepth.setFps(30); // 像素格式 mModeDepth.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM); streamDepth.setVideoMode(mModeDepth); streamDepth.setMirroringEnabled(false); //镜像 // 打开深度数据流 rc = streamDepth.start(); if (rc != STATUS_OK) { cerr << "无法打开深度数据流:" << OpenNI::getExtendedError() << endl; streamDepth.destroy(); } } else { cerr << "无法创建深度数据流:" << OpenNI::getExtendedError() << endl; } // 创建彩色图像数据流 rc = streamColor.create(xtion, SENSOR_COLOR); if (rc == STATUS_OK) { // 同样的设置彩色图像视频模式 VideoMode mModeColor; mModeColor.setResolution(640, 480); mModeColor.setFps(30); mModeColor.setPixelFormat(PIXEL_FORMAT_RGB888); streamColor.setVideoMode(mModeColor); streamColor.setMirroringEnabled(false); //镜像 // 打开彩色图像数据流 rc = streamColor.start(); if (rc != STATUS_OK) { cerr << "无法打开彩色图像数据流:" << OpenNI::getExtendedError() << endl; streamColor.destroy(); } } else { cerr << "无法创建彩色图像数据流:" << OpenNI::getExtendedError() << endl; } if (!streamColor.isValid() || !streamDepth.isValid()) { cerr << "彩色或深度数据流不合法" << endl; OpenNI::shutdown(); rc = STATUS_ERROR; return rc; } // 图像模式注册,彩色图与深度图对齐 if (xtion.isImageRegistrationModeSupported( IMAGE_REGISTRATION_DEPTH_TO_COLOR)) { xtion.setImageRegistrationMode(IMAGE_REGISTRATION_DEPTH_TO_COLOR); } return rc; } int main(int argc, char **argv) { if(argc != 3) { cerr << endl << "Usage: ./rgbd_cc path_to_vocabulary path_to_settings" << endl; return 1; } // 创建ORB_SLAM系统. (参数1:ORB词袋文件 参数2:xtion参数文件) ORB_SLAM2::System SLAM(argv[1],argv[2],ORB_SLAM2::System::RGBD,true); cout << endl << "-------" << endl; cout << "Openning Xtion ..." << endl; Status rc = STATUS_OK; // 初始化OpenNI环境 OpenNI::initialize(); showdevice(); // 声明并打开Device设备。 Device xtion; const char * deviceURL = openni::ANY_DEVICE; //设备名 rc = xtion.open(deviceURL); VideoStream streamDepth; VideoStream streamColor; if(initstream(rc, xtion, streamDepth, streamColor) == STATUS_OK) // 初始化数据流 cout << "Open Xtion Successfully!"<<endl; else { cout << "Open Xtion Failed!"<<endl; return 0; } // Main loop cv::Mat imRGB, imD; bool continueornot = true; // 循环读取数据流信息并保存在VideoFrameRef中 VideoFrameRef frameDepth; VideoFrameRef frameColor; namedWindow("RGB Image", CV_WINDOW_AUTOSIZE); for (double index = 1.0; continueornot; index+=1.0) { rc = streamDepth.readFrame(&frameDepth); if (rc == STATUS_OK) { imD = cv::Mat(frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData()); //获取深度图 } rc = streamColor.readFrame(&frameColor); if (rc == STATUS_OK) { const Mat tImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData()); //获取彩色图 cvtColor(tImageRGB, imRGB, CV_RGB2BGR); imshow("RGB Image",imRGB); } SLAM.TrackRGBD( imRGB, imD, index); // ORB_SLAM处理深度图和彩色图 char c = cv::waitKey(5); switch(c) { case 'q': case 27: //退出 continueornot = false; break; case 'p': //暂停 cv::waitKey(0); break; default: break; } } // Stop all threads SLAM.Shutdown(); SLAM.SaveTrajectoryTUM("trajectory.txt"); cv::destroyAllWindows(); return 0; } 思路很简单,首先创建orb_slam系统,传入词袋和xtion/orb参数; 然后从xtion得到彩色图和深度图,调用slam的tracking线程处理得到位姿(当然也有loop线程的闭环检测和g2o下线程的优化),融合点云到同一个坐标下并显示(pointcloudmapping.h / cc里有声明和定义)。
然后在ORB_SLAM2/CMakeLists中添加:
find_package(OpenNI2 REQUIRED) include_directories("/usr/include/openni2/") LINK_LIBRARIES( ${OpenNI2_LIBRARY} ) target_link_libraries(${PROJECT_NAME}${OpenNI2_LIBRARY}) add_executable(rgbd_xtion_cc Examples/RGB-D/rgbd_xtion_cc.cpp) target_link_libraries(rgbd_xtion_cc ${PROJECT_NAME}) #-------------------------------------------------------------------------------------------- # Camera Parameters. xtion 640*480 #-------------------------------------------------------------------------------------------- # Camera calibration and distortion parameters (OpenCV) Camera.fx: 558.341390 Camera.fy: 558.387543 Camera.cx: 314.763671然后就可以在ORB_SLAM2/build/里cmake .. 和 make了。完成后可以看到ORB_SLAM2/Examples/RGB-D/里有可执行文件rgbd_xtion_cc。(rgbd_tum是跑TUM数据集的, rgbd_cc是跑自己的数据集的,这两个都是预先采集好彩色图和深度图)
最后在ORB_SLAM2/Examples/RGB-D/里创建xtion的参数文件xtion.yaml (包含了ORB参数信息),大家根据标定(OpenCV,ROS,MATLAB等)结果自行修改内参(rgb内参和畸变):
#-------------------------------------------------------------------------------------------- # Camera Parameters. xtion 640*480 #-------------------------------------------------------------------------------------------- # Camera calibration and distortion parameters (OpenCV) Camera.fx: 558.341390 Camera.fy: 558.387543 Camera.cx: 314.763671 #-------------------------------------------------------------------------------------------- 04.# Camera Parameters. xtion 640*480 05.#-------------------------------------------------------------------------------------------- 06. 07.# Camera calibration and distortion parameters (OpenCV) 08.Camera.fx: 558.341390 09.Camera.fy: 558.387543 10.Camera.cx: 314.763671 11.Camera.cy: 240.992295 12. 13.Camera.k1: 0.062568 14.Camera.k2: -0.096148 15.Camera.p1: 0.000140 16.Camera.p2: -0.006248 17.Camera.k3: 0.000000 18.现在来运行吧~ 在ORB_SLAM2/下打开终端,输入 ./Examples/RGB-D/rgbd_xtion_cc Vocabulary/ORBvoc.txt Examples/RGB-D/xtion.yaml 系统加载ORB词袋,然后打开xtion设备,采集图像处理,显示角点,轨迹和点云:
按‘q’或esc程序退出,自动保存估计的轨迹和点云pcd文件到ORB_SLAM2/下(帧数较多时如3000帧,保存时间较长20s左右)。运行pcl_viewer xx.pcd 即可查看。保存优化后的点云的代码在pointcloudmapping.cc里:
globalMap->clear(); for(size_t i=0;i<keyframes.size();i++) // save the optimized pointcloud { cout<<"keyframe "<<i<<" ..."<<endl; PointCloud::Ptr tp = generatePointCloud( keyframes[i], colorImgs[i], depthImgs[i] ); PointCloud::Ptr tmp(new PointCloud()); voxel.setInputCloud( tp ); voxel.filter( *tmp ); *globalMap += *tmp; viewer.showCloud( globalMap ); } PointCloud::Ptr tmp(new PointCloud()); sor.setInputCloud(globalMap); sor.filter(*tmp); globalMap->swap( *tmp ); pcl::io::savePCDFileBinary ( "optimized_pointcloud.pcd", *globalMap ); cout<<"Save point cloud file successfully!"<<endl;局部1:
局部2:
局部3:
接下来准备改进词袋,尝试加入3d特征描述words,训练然后提高匹配精准度,拭目以待。
///
2016/6/23补充: 很多同学没有FindOpenNI2.cmake, 导致了系统找不到openni2的库 这里给大家提供了一个FindOpenNI2.cmake文件,复制内容到新的cmake文件, 保存后存到 /usr/share/cmake-2.8/Modules/中去就好了。 # # Try to find OPenNI2 library and include path. # Once done this will define # # FIND_PATH( OpenNI2_INCLUDE_PATH OpenNI.h /usr/include /usr/local/include /sw/include /opt/local/include DOC "The directory where OpenNI.h resides") FIND_LIBRARY( OpenNI2_LIBRARY NAMES OpenNI2 openni2 PATHS /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib /sw/lib /opt/local/lib DOC "The OpenNI2 library") IF (OpenNI2_INCLUDE_PATH) SET( OpenNI2_FOUND 1 CACHE STRING "Set to 1 if OpenNI2 is found, 0 otherwise") ELSE (OpenNI2_INCLUDE_PATH) SET( OpenNI2_FOUND 0 CACHE STRING "Set to 1 if OpenNI2 is found, 0 otherwise") ENDIF (OpenNI2_INCLUDE_PATH) MARK_AS_ADVANCED( OpenNI2_FOUND )1。Camera.bf中的b指基线baseline(单位:米),f是焦距fx(x轴和y轴差距不大),bf=b*f,和ThDepth一起决定了深度点的范围:bf * ThDepth / fx即大致为b * ThDepth。 基线在双目视觉中出现的比较多,如ORB-SLAM中的双目示例中的EuRoC.yaml中的bf为47.9,ThDepth为35,fx为435.2,则有效深度为47.9*35/435.3=3.85米;KITTI.yaml中的bf为387.57,ThDepth为40,fx为721.54,则有效深度为387.57*40/721.54=21.5米。这里的xtion的IR基线(其实也可以不这么叫)bf为40,ThDepth为50,fx为558.34,则有效深度为3.58米(官方为3.5米)。
2。DepthMapFactor: 1000.0这个很好理解,depth深度图的值为真实3d点深度*1000. 例如depth值为2683,则真是世界尺度的这点的深度为2.683米。 这个值是可以人为转换的(如opencv中的convert函数,可以设置缩放因子),如TUM中的深度图的DepthMapFactor为5000,就代表深度图中的5000个单位为1米
19.Camera.width: 640 20.Camera.height: 480 21. 22.# Camera frames per second 23.Camera.fps: 30.0 24. 25.# IR projector baseline times fx (aprox.) 26.Camera.bf: 40.0 27. 28.# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale) 29.Camera.RGB: 0 30. 31.# Close/Far threshold. Baseline times. 32.ThDepth: 50.0 33. 34.# Deptmap values factor 35.DepthMapFactor: 1000.0 36. 37.#-------------------------------------------------------------------------------------------- 38.# ORB Parameters 39.#-------------------------------------------------------------------------------------------- 40. 41.# ORB Extractor: Number of features per image 42.ORBextractor.nFeatures: 1000 43. 44.# ORB Extractor: Scale factor between levels in the scale pyramid 45.ORBextractor.scaleFactor: 1.2 46. 47.# ORB Extractor: Number of levels in the scale pyramid 48.ORBextractor.nLevels: 8 49. 50.# ORB Extractor: Fast threshold 51.# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response. 52.# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST 53.# You can lower these values if your images have low contrast 54.ORBextractor.iniThFAST: 20 55.ORBextractor.minThFAST: 7 56. 57.#-------------------------------------------------------------------------------------------- 58.# Viewer Parameters 59.#-------------------------------------------------------------------------------------------- 60.Viewer.KeyFrameSize: 0.05 61.Viewer.KeyFrameLineWidth: 1 62.Viewer.GraphLineWidth: 0.9 63.Viewer.PointSize:2 64.Viewer.CameraSize: 0.08 65.Viewer.CameraLineWidth: 3 66.Viewer.ViewpointX: 0 67.Viewer.ViewpointY: -0.7 68.Viewer.ViewpointZ: -1.8 69.Viewer.ViewpointF: 500 70. 71.#-------------------------------------------------------------------------------------------- 72.# PointCloud Mapping 73.#-------------------------------------------------------------------------------------------- 74.PointCloudMapping.Resolution: 0.01