This is an old revision of the document!


Kinect Library

The official library for Kinect v2 can be downloaded here: https://www.microsoft.com/en-us/download/details.aspx?id=44561. The library is available for JavaScrip, C# and C: https://msdn.microsoft.com/en-us/library/dn758675.aspx A Python package is also available for interacting with the Kinect SDK: https://github.com/Kinect/PyKinect2 ===== C++ Library ===== ==== Configuration in Microsoft Visual Studio ==== In order to use the library in your code, you first must configure the project: * The 'include' path must be set in "Project->Properties->C++->General->Other directories". * The 'library' path must be set in "Project->Properties->Linker->General->Other directories". * The library name must be set in "Project->Properties->Linker->Entry". The library name is "kinect20.lib". Once the configuration is done, you can start to use the library in your code. ==== Initializing the Kinect ==== The first thing to do is to include the corresponding header <code cpp> #include "kinect.h" </code> To manage data from kinect you need to create and initialize the IKinectSensor object <code cpp> IKinectSensor* kinect; HRESULT hr; hr = GetDefaultKinectSensor(&kinect); </code> If the kinect is correctly initialized, you can now open it and initialize the desired streams as color, depth and skeleton. Note that the coordinate mapper is also initialized. It will be used to map the 3D skeleton to 2D images for display purpose. <code cpp> hr = kinect->Open(); // Color stream IColorFrameSource* pColorFrameSource = NULL; IColorFrameReader* colorReader = NULL; if (SUCCEEDED(hr)){ hr = kinect->get_ColorFrameSource(&pColorFrameSource); } if (SUCCEEDED(hr)){ hr = pColorFrameSource->OpenReader(&colorReader); } // Depth stream IDepthFrameSource* pDepthFrameSource = NULL; IDepthFrameReader* depthReader = NULL; if (SUCCEEDED(hr)){ hr = kinect->get_DepthFrameSource(&pDepthFrameSource); } if (SUCCEEDED(hr)){ hr = pDepthFrameSource->OpenReader(&depthReader); } // Skeleton stream IBodyFrameSource* pBodyFrameSource = NULL; IBodyFrameReader* bodyReader = NULL; if (SUCCEEDED(hr)){ hr = kinect->get_BodyFrameSource(&pBodyFrameSource); } if (SUCCEEDED(hr)){ hr = pBodyFrameSource->OpenReader(&bodyReader); } // Coordinate Mapper ICoordinateMapper* coordinateMapper; if (SUCCEEDED(hr)){ hr = kinect->get_CoordinateMapper(&coordinateMapper); } </code> ==== Getting data ==== For getting the current data captured by the kinect, you need to call the 'AcquireLatestFrame' function for each of the reader. <code cpp> // Color data IColorFrame* pColorFrame = NULL; hr = colorReader->AcquireLatestFrame(&pColorFrame); // Depth data IDepthFrame* pDepthFrame = NULL; hr = depthReader->AcquireLatestFrame(&pDepthFrame); // Skeleton data IBodyFrame* pBodyFrame = NULL; hr = colorReader->AcquireLatestFrame(&pColorFrame); </code> ==== Processing data ==== In your code you may need to process data. Here is an example how to proceed in order to display the frames using OpenCV 2.4.x. OpenCv need to be installed and included in the header. It can be downloaded here: http://opencv.org/downloads.html === Processing and displaying color data === Here we assume that the color frame has been captured in 'pColorFrame'. <code cpp> cv::Mat tmp; RGBQUAD *pColorBuffer = new RGBQUAD[1920 * 1080]; UINT nColorBufferSize = 0; nColorBufferSize = 1920 * 1080 * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(pColorBuffer), ColorImageFormat_Bgra); if (SUCCEEDED(hr)){ tmp = cv::Mat(cv::Size(1920, 1080), CV_8UC4, pColorBuffer, cv::Mat::AUTO_STEP); if (tmp.rows != 0

cv::imshow("Color", tmp);
char key = cv::waitKey(10);
  }

} SafeRelease(pColorFrame); </code> === Processing and displaying depth data === Here we assume that the depth frame has been captured in 'pDepthFrame'. In this case, the received data are depth values in milimeter. For display purpose, the conversion in pixel value is needed. This is done using minimum and maximum depth values.

USHORT nDepthMaxDistance = 0;
USHORT nDepthMinDistance = 0;
if (SUCCEEDED(hr)){
    hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr)){
    hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinDistance);
}
cv::Mat depthImg;
UINT nBufferSize = 424*512;
UINT16*	m_Buffer = new UINT16[424 * 512];
RGBQUAD* pixelData = new RGBQUAD[424 * 512];
if (SUCCEEDED(hr)){
    hr = pDepthFrame->CopyFrameDataToArray(512 * 424, m_Buffer);
    for (int j = 0; j < nBufferSize; j=j+1){
	USHORT dval = m_Buffer[j];
	BYTE intensity = (BYTE)(dval >= nDepthMinDistance && dval <= nDepthMaxDistance ? dval : 0);
	pixelData[j].rgbBlue = intensity;
	pixelData[j].rgbRed = intensity;
	pixelData[j].rgbGreen = intensity;
    }
    depthImg = cv::Mat(cv::Size(512, 424), CV_8UC4, pixelData, cv::Mat::AUTO_STEP);
    cv::imshow("Depth", depthImg);
    key = cv::waitKey(10);
}
SafeRelease(pDepthFrame);
delete(m_Buffer);
delete(pixelData);

=== Processing and displaying skeleton data === Here we assume that the skeleton frame has been captured in 'pBodyFrame'. For better clarity, two external functions are employed for drawing the skeleton ('drawSkeleton' and 'drawLine').

cv::namedWindow("Skeleton");
cv::Mat skeletonFrame(424, 512, CV_8UC3, cv::Scalar(0, 0, 0));
IBody* ppBodies[BODY_COUNT] = { 0 };
if (SUCCEEDED(hr)){
    hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies);
}
if (SUCCEEDED(hr)){
    for (int i = 0; i < BODY_COUNT; i=i+1){
	IBody* pBody = ppBodies[i];
	if (pBody){
		BOOLEAN tracked;
		hr = pBody->get_IsTracked(&tracked);
		if (SUCCEEDED(hr) && tracked){
			Joint joints[JointType_Count];
			hr = pBody->GetJoints(_countof(joints), joints);
			if (SUCCEEDED(hr)){
				for (int j = 0; j < _countof(joints); j=j+1){
					const CameraSpacePoint position3D = joints[j].Position;
					DepthSpacePoint position2D;
					coordinateMapper->MapCameraPointToDepthSpace(position3D, &position2D);
					cv::circle(skeletonFrame, cv::Point(position2D.X, position2D.Y), 5, cv::Scalar(255, 255, 255), -1);
					}
					drawSkeleton(skeletonFrame, joints, coordinateMapper);
				}
			}
		}
	}
}
cv::imshow("Skeleton", skeletonFrame);
key = cv::waitKey(10);
for (int i = 0; i < _countof(ppBodies); i=i+1){
    SafeRelease(ppBodies[i]);
}
SafeRelease(pBodyFrame);

The two external functions are

void drawLine(cv::Mat src, Joint j1, Joint j2, ICoordinateMapper* mapper){
	if (j1.TrackingState > 1 && j2.TrackingState > 1){
		const CameraSpacePoint position3D1 = j1.Position;
		DepthSpacePoint position2D1;
		mapper->MapCameraPointToDepthSpace(position3D1, &position2D1);
		const CameraSpacePoint position3D2 = j2.Position;
		DepthSpacePoint position2D2;
		mapper->MapCameraPointToDepthSpace(position3D2, &position2D2);
		cv::line(src, cv::Point(position2D1.X, position2D1.Y), cv::Point(position2D2.X, position2D2.Y), cv::Scalar(255, 255, 255), 5);
	}
}
 
void drawSkeleton(cv::Mat src, Joint joints[JointType_Count], ICoordinateMapper* mapper){
	drawLine(src, joints[0], joints[1], mapper);
	drawLine(src, joints[1], joints[2], mapper);
	drawLine(src, joints[2], joints[3], mapper);
 
	drawLine(src, joints[2], joints[4], mapper);
	drawLine(src, joints[4], joints[5], mapper);
	drawLine(src, joints[5], joints[6], mapper);
	drawLine(src, joints[6], joints[7], mapper);
 
	drawLine(src, joints[2], joints[8], mapper);
	drawLine(src, joints[8], joints[9], mapper);
	drawLine(src, joints[9], joints[10], mapper);
	drawLine(src, joints[10], joints[11], mapper);
 
	drawLine(src, joints[0], joints[12], mapper);
	drawLine(src, joints[12], joints[13], mapper);
	drawLine(src, joints[13], joints[14], mapper);
	drawLine(src, joints[14], joints[15], mapper);
 
	drawLine(src, joints[0], joints[16], mapper);
	drawLine(src, joints[16], joints[17], mapper);
	drawLine(src, joints[17], joints[18], mapper);
	drawLine(src, joints[18], joints[19], mapper);
}

=== Safe Release === Note that each frame is safely released after being processed. This is done using the following function

// Safe release for interfaces
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease){
    if (pInterfaceToRelease != NULL){
	pInterfaceToRelease->Release();
	pInterfaceToRelease = NULL;
    }
}

==== Closing the Kinect ==== At the end of the code, we correctly close the kinect sensor

kinect->Close();

===== Python package ===== A python package is available in order to use the kinect SDK in python. It can be downloaded here: https://github.com/Kinect/PyKinect2 ==== Initialisation ==== You need to initialize the kinect with the desired sources.

from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
 
kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body)

==== Getting data ====

#color data
if kinect.has_new_color_frame():
    frame = kinect.get_last_color_frame()
 
#depth data
if kinect.has_new_depth_frame():
    frameD = kinect.get_last_depth_frame()
 
#skeleton data
if kinect.has_new_body_frame():
    bodies= kinect.get_last_color_frame()

==== Processing data ==== Here is some examples how to proceed data in order to display it using pygame. === Color data === This example shows how to display the color frame using pygame. The color frame has been captured in 'frame'.

pygame.init()
screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1), 
                                               pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)                                             
 
frame_surface.lock()
address = kinect.surface_as_array(frame_surface.get_buffer())
ctypes.memmove(address, frame.ctypes.data, frame.size)
del address
frame_surface.unlock()
 
# --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio
# --- (screen size may be different from Kinect's color frame size) 
h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
target_height = int(h_to_w * self._screen.get_width())
surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width(), target_height));
self._screen.blit(surface_to_draw, (0,0))
surface_to_draw = None
pygame.display.update()
 
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()

=== Skeleton data === This code shows how to add skeleton data to the color image. Skeletons have been captured in 'bodies'. It must be implemented before copying back buffer surface pixels to the screen (see above).

# --- draw skeletons to _frame_surface
if bodies is not None: 
      for i in range(0, kinect.max_body_count):
           body = bodies.bodies[i]
           if not body.is_tracked: 
               continue
 
           joints = body.joints
           ori = body.joint_orientations
           # convert joint coordinates to color space
           joint_points_color = kinect.body_joints_to_color_space(joints)
           draw_body(joints, joint_points_color, pygame.color.THECOLORS["red"])

Similarly to C library, two external functions are used to display the skeleton

def draw_body_bone(joints, jointColorPoints, color, joint0, joint1):
    joint0State = joints[joint0].TrackingState;
    joint1State = joints[joint1].TrackingState;
 
    # both joints are not tracked
    if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked): 
        return
 
    # both joints are not *really* tracked
    if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred):
       return
 
    # at least one is good 
    start = (jointColorPoints[joint0].x, jointColorPoints[joint0].y)
    end = (jointColorPoints[joint1].x, jointColorPoints[joint1].y)
    try:
       pygame.draw.line(frame_surface, color, start, end, 8)
    except: # need to catch it due to possible invalid positions (with inf)
       pass
 
def draw_body(joints, jointColorPoints, color):
    # Torso
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft);
 
    # Right Arm    
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight);
 
    # Left Arm
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft);
 
    # Right Leg
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight);
 
    # Left Leg
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft);
    draw_body_bone(joints, jointColorPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft);
  • sensors/kinect_library.1488388128.txt.gz
  • Last modified: 2019/04/25 14:08
  • (external edit)