/* --------------------------------------------------------------------------
* SimpleOpenNI DepthImage Test
* --------------------------------------------------------------------------
* Processing Wrapper for the OpenNI/Kinect library
* http://code.google.com/p/simple-openni
* --------------------------------------------------------------------------
* prog: Max Rheiner / Interaction Design / zhdk / http://iad.zhdk.ch/
* date: 02/16/2011 (m/d/y)
* ----------------------------------------------------------------------------
*/
import SimpleOpenNI.*;
SimpleOpenNI context;
void setup()
{
context = new SimpleOpenNI(this);
// mirror is by default enabled
context.setMirror(true);
// enable depthMap generation
if(context.enableDepth() == false)
{
println("Can't open the depthMap, maybe the camera is not connected!");
exit();
return;
}
// enable ir generation
//context.enableRGB(640,480,30);
//context.enableRGB(1280,1024,15);
if(context.enableRGB() == false)
{
println("Can't open the rgbMap, maybe the camera is not connected or there is no rgbSensor!");
exit();
return;
}
size(context.depthWidth() + context.rgbWidth() + 10, context.rgbHeight());
}
void draw()
{
// update the cam
context.update();
background(200,0,0);
// draw depthImageMap
image(context.depthImage(),0,0);
// draw irImageMap
image(context.rgbImage(),context.depthWidth() + 10,0);
}
I know you said the code is for processing but I have a book and it said add the samples to the library click on the depth map and boom I should have a screen shot of myself but I didn't get it through arduino I got it threw openni though I am a little lost I don't want to move onto the next step if I dont fully understand this one.