2012-07-23 40 views

回答

0

我不认为你可以配置什么深度范围来记录,但你可以做的是过程,你从.oni记录读取的信息:

import SimpleOpenNI.*; 

SimpleOpenNI context; 
boolean  recordFlag = true; 
boolean  saving = false; 
int frames = 0; 
int savedFrames = 0; 
//change these two values as you wish: 
float minZ = 100; 
float maxZ = 500; 

void setup(){ 
    context = new SimpleOpenNI(this); 

    if(! recordFlag){ 
    if(! context.openFileRecording("test.oni")){ 
     println("can't find recording !!!!"); 
     exit(); 
    } 
    context.enableDepth(); 
    }else{ 
    // recording 
    context.enableDepth(); 
    // setup the recording 
    context.enableRecorder(SimpleOpenNI.RECORD_MEDIUM_FILE,"test.oni"); 
    // select the recording channels 
    context.addNodeToRecording(SimpleOpenNI.NODE_DEPTH,SimpleOpenNI.CODEC_16Z_EMB_TABLES); 
    } 
    // set window size 
    if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0) 
    size(context.depthWidth() , context.depthHeight()); 
    else 
    exit(); 
} 
void draw() 
{ 
    background(0); 
    context.update(); 
    if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0) image(context.depthImage(),0,0); 
    if(recordFlag) frames++; 
    if(saving && savedFrames < frames){ 
     delay(3000);//hack 
     int i = savedFrames; 
     int w = context.depthWidth(); 
     int h = context.depthHeight(); 
     PrintWriter output = createWriter(dataPath("frame_"+i+".ply")); 
     output.println("ply"); 
     output.println("format ascii 1.0"); 
     output.println("element vertex " + (w*h)); 
     output.println("property float x"); 
     output.println("property float y"); 
     output.println("property float z"); 
     output.println("end_header\n"); 
     rect(random(width),random(height),100,100); 
     int[] depthMap = context.depthMap(); 
     int  index; 
     PVector realWorldPoint; 
     for(int y=0;y < h;y++){ 
     for(int x=0;x < w;x++){ 
      index = x + y * w; 
      realWorldPoint = context.depthMapRealWorld()[index]; 
      if(realWorldPoint.z > minZ && realWorldPoint.z < maxZ) 
      output.println(realWorldPoint.x + " " + realWorldPoint.y + " " + realWorldPoint.z); 
     } 
     } 
     output.flush(); 
     output.close(); 
     println("saved " + (i+1) + " of " + frames); 
     savedFrames++; 
    } 
} 
void keyPressed(){ 
    if(key == ' '){ 
    if(recordFlag){ 
     saveStrings(dataPath("frames.txt"),split(frames+" ",' ')); 
     exit(); 
    }else saveONIToPLY(); 
    } 
} 
void saveONIToPLY(){ 
    frames = int(loadStrings(dataPath("frames.txt"))[0]); 
    saving = true; 
    println("recording " + frames + " frames"); 
} 
+0

我认为你是对的。我找到了一个openFrameworks代码库,它允许我获取ONI文件并在某个z深度将其导出,但是此代码非常适合将其可视化。谢谢! – mheavers 2012-07-24 16:18:35

+0

酷,很好,你可以使用openFrameworks,对于kinect的东西速度要好得多。对于更高级的点云处理,您还可以看一下[Point Clouds Library](http://pointclouds.org/),但要记住它并不是非常直接的设置(因为它有很多依赖)。你的项目听起来很有趣,在某些时候看到网上的东西会很酷:) Gooduck! – 2012-07-24 16:24:03

+0

当然,我会尽快给你发送一些我正在处理的内容。 – mheavers 2012-07-25 00:05:40