激光相机数据融合(5)--Gazebo仿真数据融合

时间:2023-03-09 01:11:30
激光相机数据融合(5)--Gazebo仿真数据融合

这一节将用ROS+Gazebo 环境获取激光获取点云,并用PCL和OPENCV处理,源代码在:https://github.com/ZouCheng321/5_laser_camera_sim

由于激光的视角远大于相机,所以我们使用了5个相机来获取图像,这类似于Ladybug相机:

激光相机数据融合(5)--Gazebo仿真数据融合

激光相机数据融合(5)--Gazebo仿真数据融合

相机获取的五张图像:

激光相机数据融合(5)--Gazebo仿真数据融合

接下来我们用来构建彩色点云:

相机与激光的位置变换,由于是正五边形分别,这很容易求得:

  Eigen::Matrix4f rt0,rt1,rt2,rt3,rt4;
rt0<< ,,-,, ,,,, ,,,, ,,,;
rt1<< ,,-,, -0.95105651629,0.30901699437,,, 0.30901699437,0.95105651629,,, ,,,;
rt2 << ,,-,, -0.58778525229,-0.80901699437,,, -0.80901699437,0.58778525229,,, ,,,;
rt3 << ,,-,, 0.58778525229,-0.80901699437,,, -0.80901699437,-0.58778525229,,, ,,,;
rt4 << ,,-,, 0.95105651629,0.30901699437,,, 0.30901699437,-0.95105651629,,, ,,,;
Eigen::Matrix4f inv0,inv1,inv2,inv3,inv4;
inv0=rt0.inverse();
inv1=rt1.inverse();
inv2=rt2.inverse();
inv3=rt3.inverse();
inv4=rt4.inverse(); RT.push_back(rt0);
RT.push_back(rt1);
RT.push_back(rt2);
RT.push_back(rt3);
RT.push_back(rt4); INV.push_back(inv0);
INV.push_back(inv1);
INV.push_back(inv2);
INV.push_back(inv3);
INV.push_back(inv4);

相机的内参,已经在仿真软件中设定:

 std::vector<cv::Point2d> imagePoints;

    cv::Mat intrisicMat(, , cv::DataType<double>::type); // Intrisic matrix
intrisicMat.at<double>(, ) = 476.715669286;
intrisicMat.at<double>(, ) = ;
intrisicMat.at<double>(, ) = ; intrisicMat.at<double>(, ) = ;
intrisicMat.at<double>(, ) = 476.715669286;
intrisicMat.at<double>(, ) = ; intrisicMat.at<double>(, ) = ;
intrisicMat.at<double>(, ) = ;
intrisicMat.at<double>(, ) = ; cv::Mat rVec(, , cv::DataType<double>::type); // Rotation vector
rVec.at<double>() = ;
rVec.at<double>() = ;
rVec.at<double>() = ; cv::Mat tVec(, , cv::DataType<double>::type); // Translation vector
tVec.at<double>() = 0.4;
tVec.at<double>() = ;
tVec.at<double>() = -0.1; cv::Mat distCoeffs(, , cv::DataType<double>::type); // Distortion vector
distCoeffs.at<double>() = ;
distCoeffs.at<double>() = ;
distCoeffs.at<double>() = ;
distCoeffs.at<double>() = ;
distCoeffs.at<double>() = ;

去除相机后方的点云:

std::vector<cv::Point3d> Generate3DPoints(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud,int num)
{
std::vector<cv::Point3d> points;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_f (new pcl::PointCloud<pcl::PointXYZ>); Eigen::Matrix4f TR;
TR << ,,-,, ,,,, ,,,, ,,,;
pcl::transformPointCloud (*cloud, *cloud_f, RT[num]); pcl::PassThrough<pcl::PointXYZ> pass;
pass.setInputCloud (cloud_f);
pass.setFilterFieldName ("z");
pass.setFilterLimits (0.0, );
//pass.setFilterLimitsNegative (true);
pass.filter (*cloud);
cout<<"size:"<<cloud->size()<<endl; for(int i=;i<=cloud->points.size();i++)
{
points.push_back(cv::Point3d(cloud->points[i].x, cloud->points[i].y, cloud->points[i].z));
} return points;
}

将前方的点云投影到相机平面,这里直接用opencv自带的projectPoints函数:

        cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, imagePoints);

保留图像内的点云:

 for(int i=;i<imagePoints.size();i++)
{
if(imagePoints[i].x>=&&imagePoints[i].x<&&imagePoints[i].y>=&&imagePoints[i].y<)
{ pcl::PointXYZRGB point;
point.x = cloud->points[i].x;
point.y = cloud->points[i].y;
point.z = cloud->points[i].z;
point.r = _I(round(imagePoints[i].x),round(imagePoints[i].y))[];
point.g = _I(round(imagePoints[i].x),round(imagePoints[i].y))[];
point.b = _I(round(imagePoints[i].x),round(imagePoints[i].y))[]; colored_cloud->points.push_back (point);
}
}

最后显示所有点云:

  pcl::visualization::PCLVisualizer viewer("Cloud viewer");
viewer.addPointCloud(colored_cloud_sum, "sample cloud");
viewer.setBackgroundColor(,,); while(!viewer.wasStopped())
//while (!viewer->wasStopped ())
viewer.spinOnce();

要构建这个项目:

cd 5_laser_camera_sim
mkdir build
cd build
cmake ..
make
./color

将看到如下显示:

激光相机数据融合(5)--Gazebo仿真数据融合