2011-07-13 103 views
6

我想做类似于Johhny李在他的Wii头部跟踪 http://www.youtube.com/watch?v=Jd3-eiid-Uw&feature=player_embedded如何跟踪头部位置

做了什么,但我想使用Kinect的。由于微软的sdk暴露了骨骼关节,所以我希望我能够用它来获得头部位置。问题是我想用我的台式电脑和显示器来做到这一点。如果我将Kinect传感器放在我的显示器旁边并坐在办公桌旁。几乎只有我的头部和颈部对传感器可见,所以骨骼跟踪并不会拾取我的头部位置。

是否有人熟悉使用Kinect的头部跟踪项目?最好在C#

回答

1

对于Windows SDK的官方Kinect有一些限制,与XBox和XDK提供的指导相符,因为您需要距离传感器1.2米到3.5米才能够使用Kinect传感器。这种限制实际上在替代SDK中减少了,例如OpenNI/NITE库,这可以让您检测靠近传感器的骨骼/物体。

问题你也会有skeltal输入是它只会检测头部的位置与骨骼的比例,但不是如果你旋转你的头到一边。为了达到这个目的,你不会使用原始的深度流和围绕物体识别的一些智能,这些稍微复杂一些。

在过去,我已经使用它使用一个摄像头跟踪头部运动,而这种商业.NET API实现你所追求的:http://luxand.com/facesdk/index2.php

2

我认为这个程序,你不能使用骨架跟踪由任何框架提供,如微软的SDK或OpenNI。

我会建议通过将深度阈值应用于原始深度数据来分割用户的头部。这应该导致背景减少。我认为现在已经有了这样做的方法。

作为第二步,您会喜欢在分段用户内部拥有像轴一样的东西。最简单的方法是使用opencv fitEllipse。返回的椭圆的长轴与深度信息结合为您提供此轴。

此方法仅适用于大多数分段点属于用户头部时。如果你离得更远,你必须考虑一种只分割头部的方法。椭圆拟合应始终有效。

1

你不需要kinect来跟踪你的头部位置。通过使用面部追踪,您可以使用常规相机和openCV来做同样的事情。

简单的例子表明这里:http://vimeo.com/19464641

在视频中,我使用的OpenCV追踪我的脸(你可以在角落里几乎看不到,但红点表示我的脸位置)。

+0

此外,OpenCV可以通过C#与emgu项目([http://www.emgu.com/wiki/index.php/Main_Page])进行访问。甚至还有一个KinectCapture方法调用可以用来获取视频帧并轻松进行人脸识别([http://www.emgu.com/wiki/files/2.4.2/document/html/a8b9f573-4c88-fdc0- a3b1-189096e54caa.htm]) –

-1

我建议使用:Aforge.net与Microsoft XNA Framework或仅Aforge.net一起使用。尽管如此,你仍然需要做一些开发。我也在使用C#开展类似的工作。我认为你将无法找到一个完整的开箱即用的例子。没有身体做到这一点。 (纠正我,如果我错了)。

1

查看频道9s tutorials关于此类主题。你会去骨骼基础视频。但如果你想节省时间,这里有一些代码。
XAML

<Window x:Class="SkeletalTracking.MainWindow" 
    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" 
    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" 
    Title="MainWindow" Height="600" Width="800" Loaded="Window_Loaded" 
    xmlns:my="clr-namespace:Microsoft.Samples.Kinect.WpfViewers;assembly=Microsoft.Samples.Kinect.WpfViewers" 
    Closing="Window_Closing" WindowState="Maximized">  
<Canvas Name="MainCanvas"> 
    <my:KinectColorViewer Canvas.Left="0" Canvas.Top="0" Width="640" Height="480" Name="kinectColorViewer1" 
          Kinect="{Binding ElementName=kinectSensorChooser1, Path=Kinect}" /> 
    <my:KinectSensorChooser Canvas.Left="250" Canvas.Top="380" Name="kinectSensorChooser1" Width="328" /> 
    <Image Canvas.Left="66" Canvas.Top="90" Height="87" Name="headImage" Stretch="Fill" Width="84" Source="/SkeletalTracking;component/c4f-color.png" /> 
</Canvas> 

内码

using System; 
using System.Collections.Generic; 
using System.Linq; 
using System.Text; 
using System.Windows; 
using System.Windows.Controls; 
using System.Windows.Data; 
using System.Windows.Documents; 
using System.Windows.Input; 
using System.Windows.Media; 
using System.Windows.Media.Imaging; 
using System.Windows.Navigation; 
using System.Windows.Shapes; 
using Microsoft.Kinect; 
using Coding4Fun.Kinect.Wpf; 

namespace SkeletalTracking 
{ 
/// <summary> 
/// Interaction logic for MainWindow.xaml 
/// </summary> 
public partial class MainWindow : Window 
{ 
    public MainWindow() 
    { 
     InitializeComponent(); 
    } 

    bool closing = false; 
    const int skeletonCount = 6; 
    Skeleton[] allSkeletons = new Skeleton[skeletonCount]; 

    private void Window_Loaded(object sender, RoutedEventArgs e) 
    { 
     kinectSensorChooser1.KinectSensorChanged += new DependencyPropertyChangedEventHandler(kinectSensorChooser1_KinectSensorChanged); 

    } 

    void kinectSensorChooser1_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e) 
    { 
     KinectSensor old = (KinectSensor)e.OldValue; 

     StopKinect(old); 

     KinectSensor sensor = (KinectSensor)e.NewValue; 

     if (sensor == null) 
     { 
      return; 
     } 




     var parameters = new TransformSmoothParameters 
     { 
      Smoothing = 0.3f, 
      Correction = 0.0f, 
      Prediction = 0.0f, 
      JitterRadius = 1.0f, 
      MaxDeviationRadius = 0.5f 
     }; 
     //sensor.SkeletonStream.Enable(parameters); 

     sensor.SkeletonStream.Enable(); 

     sensor.AllFramesReady += new EventHandler<AllFramesReadyEventArgs>(sensor_AllFramesReady); 
     sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); 
     sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); 

     try 
     { 
      sensor.Start(); 
     } 
     catch (System.IO.IOException) 
     { 
      kinectSensorChooser1.AppConflictOccurred(); 
     } 
    } 

    void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e) 
    { 
     if (closing) 
     { 
      return; 
     } 

     //Get a skeleton 
     Skeleton first = GetFirstSkeleton(e); 

     if (first == null) 
     { 
      return; 
     } 



     //set scaled position 
     ScalePosition(headImage, first.Joints[JointType.Head]); 
     //ScalePosition(leftEllipse, first.Joints[JointType.HandLeft]); 
     //ScalePosition(rightEllipse, first.Joints[JointType.HandRight]); 

     GetCameraPoint(first, e); 

    } 

    void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e) 
    { 

     using (DepthImageFrame depth = e.OpenDepthImageFrame()) 
     { 
      if (depth == null || 
       kinectSensorChooser1.Kinect == null) 
      { 
       return; 
      } 


      //Map a joint location to a point on the depth map 
      //head 
      DepthImagePoint headDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.Head].Position); 
      //left hand 
      DepthImagePoint leftDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position); 
      //right hand 
      DepthImagePoint rightDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position); 


      //Map a depth point to a point on the color image 
      //head 
      ColorImagePoint headColorPoint = 
       depth.MapToColorImagePoint(headDepthPoint.X, headDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 
      //left hand 
      ColorImagePoint leftColorPoint = 
       depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 
      //right hand 
      ColorImagePoint rightColorPoint = 
       depth.MapToColorImagePoint(rightDepthPoint.X, rightDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 


      //Set location 
      CameraPosition(headImage, headColorPoint); 
      //CameraPosition(leftEllipse, leftColorPoint); 
      //CameraPosition(rightEllipse, rightColorPoint); 
     }   
    } 


    Skeleton GetFirstSkeleton(AllFramesReadyEventArgs e) 
    { 
     using (SkeletonFrame skeletonFrameData = e.OpenSkeletonFrame()) 
     { 
      if (skeletonFrameData == null) 
      { 
       return null; 
      } 


      skeletonFrameData.CopySkeletonDataTo(allSkeletons); 

      //get the first tracked skeleton 
      Skeleton first = (from s in allSkeletons 
            where s.TrackingState == SkeletonTrackingState.Tracked 
            select s).FirstOrDefault(); 

      return first; 

     } 
    } 

    private void StopKinect(KinectSensor sensor) 
    { 
     if (sensor != null) 
     { 
      if (sensor.IsRunning) 
      { 
       //stop sensor 
       sensor.Stop(); 

       //stop audio if not null 
       if (sensor.AudioSource != null) 
       { 
        sensor.AudioSource.Stop(); 
       } 


      } 
     } 
    } 

    private void CameraPosition(FrameworkElement element, ColorImagePoint point) 
    { 
     //Divide by 2 for width and height so point is right in the middle 
     // instead of in top/left corner 
     Canvas.SetLeft(element, point.X - element.Width/2); 
     Canvas.SetTop(element, point.Y - element.Height/2); 

    } 

    private void ScalePosition(FrameworkElement element, Joint joint) 
    { 
     //convert the value to X/Y 
     //Joint scaledJoint = joint.ScaleTo(1280, 720); 

     //convert & scale (.3 = means 1/3 of joint distance) 
     //Joint scaledJoint = joint.ScaleTo(1280, 720, .3f, .3f); 

     Canvas.SetLeft(element, scaledJoint.Position.X); 
     Canvas.SetTop(element, scaledJoint.Position.Y); 

    } 


    private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e) 
    { 
     closing = true; 
     StopKinect(kinectSensorChooser1.Kinect); 
    } 



    } 
} 

我个人推荐观看的影片,因为他们解释一切。祝你的项目好运!