使用Kinect2.0获取点云以在GLUT中显示

时间:2021-09-21 15:57:47

使用Kinect2.0获取点云以在GLUT中显示

这篇文章用来记录Kinect2.0如何生成点云.

以下示例源自Kinect提供的example修改完成,其名称会在小标题下方注解.


首先,要获取点云需要获取图像的深度数据颜色数据.最后再将深度数据与颜色数据转为点云.

1.获取图像深度数据:

  基于Depth Basic -D2D Example修改

HRESULT CMotionRecognition::GetDepthImage(){

    if (!m_pDepthFrameReader)
{
return E_FAIL;
} IDepthFrame * pDepthFrame = nullptr; HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame); if (SUCCEEDED(hr)){
IFrameDescription * pFrameDescription = nullptr;
USHORT nDepthMinReliableDistance = ;
USHORT nDepthMaxDistance = ;
UINT16 *pBuffer = NULL;
UINT nBufferSize = ; if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
} if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nDepthWidth);
} if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nDepthHeight);
} if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
} if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
} if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
} if (SUCCEEDED(hr))
{
ConvertMat_depth(pBuffer, nDepthMinReliableDistance, nDepthMaxDistance);
} SafeRelease(pFrameDescription);
} SafeRelease(pDepthFrame); return hr;
}

2.获取图像颜色数据:

  基于Color Basic-D2D Example修改

HRESULT CMotionRecognition::GetColorImage(){

    if (!m_pColorFrameReader)
{
return E_FAIL;
} IColorFrame* pColorFrame = NULL; HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hr))
{
INT64 nTime = ;
IFrameDescription* pFrameDescription = NULL;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nBufferSize = ;
RGBQUAD *pBuffer = NULL; hr = pColorFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr))
{
hr = pColorFrame->get_FrameDescription(&pFrameDescription);
} if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nColorWidth);
} if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nColorHeight);
} if (SUCCEEDED(hr))
{
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
} if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
}
else if (m_pColorRGBX)
{
pBuffer = m_pColorRGBX;
nBufferSize = nColorWidth * nColorHeight * sizeof(RGBQUAD);
hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
} if (SUCCEEDED(hr))
{
ConvertMat_color(pBuffer, nColorWidth, nColorHeight);
} SafeRelease(pFrameDescription); } SafeRelease(pColorFrame); return hr; }

3.处理图像数据函数

1/2中有一个ConvertMat_*函数,他是负责处理获取的图像颜色数据的,因为点云的转换需要深度数据和图像颜色数据,注意在这还可以创建OpenCV的Mat.

但这里只给出将获取的数据转存到pDepthBuffer(类中的一个成员)中的案例.

ConvertMat_depth()
void CMotionRecognition::ConvertMat_depth(const UINT16* _pBuffer, USHORT nMinDepth, USHORT nMaxDepth)
{
const UINT16
* pBuffer = _pBuffer,
* pBufferEnd = _pBuffer + (nDepthWidth * nDepthHeight); UINT16 * pDepthBufferTmp = pDepthBuffer; while (pBuffer < pBufferEnd)
{
*pDepthBufferTmp = *pBuffer; ++pDepthBufferTmp;
++pBuffer;
} }

ConvertMat_color()
void CMotionRecognition::ConvertMat_color(const RGBQUAD* _pBuffer, int nWidth, int nHeight)
{
const RGBQUAD
* pBuffer = _pBuffer,
* pBufferEnd = pBuffer + (nWidth * nHeight); RGBQUAD * pBufferTmp = m_pColorRGBX; while (pBuffer < pBufferEnd)
{
*pBufferTmp = *pBuffer;
++pBufferTmp;
++pBuffer;
} }

4.合成为点云:

  基于CoordinateMappingBasics-D2D Example修改

osg::ref_ptr<osg::Node> CMotionRecognition::AssembleAsPointCloud(float _angle, int _axisX, int _axisY, int _axisZ)
{
if (!m_pKinectSensor)
{
return E_FAIL;
}
// osg空间坐标
osg::ref_ptr<osg::Vec3Array> point3dVec = new osg::Vec3Array();
// osg颜色值
osg::ref_ptr<osg::Vec4Array> colorVec = new osg::Vec4Array(); ICoordinateMapper * m_pCoordinateMapper = nullptr; HRESULT hr = m_pKinectSensor->get_CoordinateMapper(&m_pCoordinateMapper); for (size_t y = ; y != nDepthHeight; y++)
{
for (size_t x = ; x != nDepthWidth; x++)
{
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 currDepth = pDepthBuffer[y * nDepthWidth + x];

       // Coordinate Mapping Depth to Color Space
ColorSpacePoint colorSpacePoint = { 0.0f, 0.0f };
m_pCoordinateMapper->MapDepthPointToColorSpace(depthSpacePoint, currDepth, &colorSpacePoint);
int colorX = static_cast<int>(std::floor(colorSpacePoint.X + 0.5f)),
colorY = static_cast<int>(std::floor(colorSpacePoint.Y + 0.5f));
if (( <= colorX) && (colorX < nColorWidth) && ( <= colorY) && (colorY < nColorHeight))
{
RGBQUAD color = m_pColorRGBX[colorY * nColorWidth + colorX];
colorVec->push_back(osg::Vec4f((float)color.rgbBlue / , (float)color.rgbGreen / , (float)color.rgbRed / , ));
}

       // Coordinate Mapping Depth to Camera Space
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f };
m_pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, currDepth, &cameraSpacePoint);
if (( <= colorX) && (colorX < nColorWidth) && ( <= colorY) && (colorY < nColorHeight))
{
point3dVec->push_back(osg::Vec3(cameraSpacePoint.X, cameraSpacePoint.Y, cameraSpacePoint.Z));
}
}
} // 叶节点
osg::ref_ptr<osg::Geode> geode = new osg::Geode(); // 用来存储几何数据信息 构造图像 保存了顶点数组数据的渲染指令
osg::ref_ptr<osg::Geometry> geom = new osg::Geometry();
  
geom->setVertexArray(point3dVec.get()); geom->setColorArray(colorVec.get());
// 每一个颜色对应着一个顶点
geom->setColorBinding(osg::Geometry::BIND_PER_VERTEX);
// 指定数据绘制的方式
geom->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::POINTS, , point3dVec->size()));
// 加载到Geode中
geode->addDrawable(geom.get());

  return geode;
}

下面是使用GLUT显示的结果:

使用Kinect2.0获取点云以在GLUT中显示

可以看到帧率只有2.1左右,在后期要是在需要做处理的话则要更小了.

若谁还有更好的办法生成点云的话,欢迎留言 : )