openNI kinect embedded registration

June 30, 2011

thanks Nicolas, openNI provided the registration API already!

here is the code:

	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
	CHECK_RC(nRetVal, "Find depth generator");
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
	CHECK_RC(nRetVal, "Find image generator");
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);

warp depth to color.

then

  for(int j=0; j<kin_h; j++)
   {
	   for(int i=0; i<kin_w; i++)
	   {
		   d = kinDepth.at<ushort>(j, i);
		   Vec3b xyz = depthImage.at<Vec3b>(j, i);
		   	if(xyz[0]>180)
			       cutoutImage.at<Vec3b>(j, i) = kinColor.at<Vec3b>(j, i);
 }
   }
#if showWin
    Mat depth_color(480, 1280, CV_8UC3);
	cutoutImage.copyTo(depth_color(Rect(0, 0, 640, 480)));
	kinColor.copyTo(depth_color(Rect(640, 0, 640, 480)));
	imshow("depth_In_color", depth_color);
	//imwrite("./depth_In_color2.jpg", depth_color);
	waitKey(1);

I just use a simple thresholding, normalized RGB depth >180, get the result, well register! cheers.

cutot result


OpenNI SDK for Kinect-part 2

June 27, 2011

as we know, color image and depth map doesn’t line up, so warping step is very important to get the registered two kinds of information, especially, in the human segmentation with both color and depth.

here is the function I wrote:

void generatePointCloud()
{
    Point3f ptDepth3D;
    Point3f ptColor3D;
    Mat depthDisMtx = (Mat_<float>(1, 5) <<
                  -2.6389095690190378e-01, 9.9983033880181316e-01, -7.6323952014484080e-04, 5.0337278410637169e-03, -1.3056496956879815e+00);
    Mat colorIntMtx = (Mat_<float>(3, 3) <<
                  5.2921508098293293e+02, 0., 3.2894272028759258e+02,
                  0., 5.2556393630057437e+02, 2.6748068171871557e+02,
                  0., 0., 1.);
    Mat colorDisMtx = (Mat_<float>(1, 5) <<
            2.6451622333009589e-01, -8.3990749424620825e-01, -1.9922302173693159e-03, 1.4371995932897616e-03, 9.1192465078713847e-01);
    float fx_d = depthIntMtx.at<float>(0,0);
    float fy_d = depthIntMtx.at<float>(1,1);
    float cx_d = depthIntMtx.at<float>(0,2);
    float cy_d = depthIntMtx.at<float>(1,2);

    float fx_rgb = colorIntMtx.at<float>(0,0);
    float fy_rgb = colorIntMtx.at<float>(1,1);
    float cx_rgb = colorIntMtx.at<float>(0,2);
    float cy_rgb = colorIntMtx.at<float>(1,2);
    double R[9] = {
          9.9984628826577793e-01, 1.2635359098409581e-03, -1.7487233004436643e-02,
          -1.4779096108364480e-03, 9.9992385683542895e-01, -1.2251380107679535e-02,
          1.7470421412464927e-02, 1.2275341476520762e-02, 9.9977202419716948e-01
          };
   Vec3d T(1.9985242312092553e-02, -7.4423738761617583e-04, -1.0916736334336222e-02);
   unsigned short d;
   for(int j=0; j<kin_h; j++)
   {
	   for(int i=0; i<kin_w; i++)
	   {
		   d = kinDepth.at<unsigned short>(j, i);
		   ptDepth3D.x = (i - cx_d) * d / fx_d;
		   ptDepth3D.y = (j - cy_d) * d / fy_d;
		   ptDepth3D.z = d;
		   ptColor3D.x = R[0]*ptDepth3D.x + R[1]*ptDepth3D.y + R[2]*ptDepth3D.z + T[0];
		   ptColor3D.y = R[3]*ptDepth3D.x + R[4]*ptDepth3D.y + R[5]*ptDepth3D.z + T[1];
		   ptColor3D.z = R[6]*ptDepth3D.x + R[7]*ptDepth3D.y + R[8]*ptDepth3D.z + T[2];
		   x_rgb = cx_rgb + (ptColor3D.x * fx_rgb / ptColor3D.z);
		   y_rgb = cy_rgb + (ptColor3D.y * fy_rgb / ptColor3D.z);
		   if (x_rgb>=0 && x_rgb<kin_w && y_rgb>=0 && y_rgb<kin_h)
		   {
                     Vec3b color = (Vec3b)kinColor.at<Vec3b>((int)ceil(y_rgb),(int)ceil(x_rgb);
                     kinColorInDepth.at<Vec3b>(j, i) = color;
                   }
	   }
   }
#if showWin
       Mat depth_color(480, 1280, CV_8UC3);
       kinColor.copyTo(depth_color(Rect(0, 0, 640, 48)));
       kinColorInDepth.copyTo(depth_color(Rect(640, 0, 640, 480)));
       cvWaitKey(1);
#endif
}

result is like this

left: depth in color; right: kinect color

distance thresholding cutout

PS: In this experiment, I just want to learn the warping process, because the parameters are for other kinect, not mine, so will see the horizontal shift.

enjoying:)


OpenNI SDK for Kinect-part 1

June 27, 2011

Several months ago, I used OpenNI SDK to get color and depth from kinect sensors ,for human segmentation. Today, I post some codes here, maybe useful for new guys to do the basic programming with kinect^-^

here is the function I wrote to get the color information, and convert to openCV Mat.

void get_KinectColorImg(ImageMetaData& _imageMD)
{
    Mat colorArr[3];
    const XnRGB24Pixel* pImageRow;
    const XnRGB24Pixel* pPixel;
    pImageRow = _imageMD.RGB24Data();
    colorArr[0] = cv::Mat(kin_h,kin_w,CV_8U);
    colorArr[1] = cv::Mat(kin_h,kin_w,CV_8U);
    colorArr[2] = cv::Mat(kin_h,kin_w,CV_8U);
    for (unsigned int y=0; y<kin_h; y++)
    {
      pPixel = pImageRow;
      uchar* Bptr = colorArr[0].ptr<uchar>(y);
      uchar* Gptr = colorArr[1].ptr<uchar>(y);
      uchar* Rptr = colorArr[2].ptr<uchar>(y);
      for(unsigned int x=0;x<kin_w;++x , ++pPixel)
      {
		  Bptr[x] = pPixel->nBlue;
		  Gptr[x] = pPixel->nGreen;
		  Rptr[x] = pPixel->nRed;
      }
      pImageRow += _imageMD.XRes();
    }
    cv::merge(colorArr,3,kinColor);
	//imwrite("./color.jpg", kinColor);
#if showWin
	imshow("original kinColor1", kinColor);
	cvWaitKey(1);
#endif
}

and next is function to get the depth information, and convert to openCV Mat,

void get_KinectDepthImg(DepthMetaData& _depthMD)
{
	const XnDepthPixel* pDepthMap = _depthMD.Data();
	for (int nY=0; nY<kin_h; nY++)
	{
		unsigned short* data =(unsigned short*) kinDepth.ptr(nY);
		float* data_meter = (float*)kinDepth_meter.ptr(nY);
		for (int nX=0; nX<kin_w; nX++)
		{
			unsigned short nValue = *pDepthMap;
			float nMeter = raw_depth_to_meters(nValue);
			data_meter[nX] = nMeter;
			data[nX] = nValue;
			pDepthMap++;
		}
	}
#if showWin
	//int f = _depthMD.YOffset();
	assignDepthHist(kinDepth);
	saveFrame_depth(_depthMD, kindepth_stro1);
	//showFrame_depth(kinDepth, kindepth_stro1);
	//drawDepthMap(kinDepth, kindepth_stro2);
#endif
}