2011-05-10 106 views
4

我有在Objective-C中运行时创建,配置和启动视频捕获会话的代码,没有任何问题。我移植样品C#和MonoTouch的4.0.3,并有几个问题,这里是代码:使用MonoTouch在iOS中捕获视频

void Initialize() 
    { 
     // Create notifier delegate class 
     captureVideoDelegate = new CaptureVideoDelegate(this); 

     // Create capture session 
     captureSession = new AVCaptureSession(); 
     captureSession.SessionPreset = AVCaptureSession.Preset640x480; 

     // Create capture device 
     captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); 

     // Create capture device input 
     NSError error; 
     captureDeviceInput = new AVCaptureDeviceInput(captureDevice, out error); 
     captureSession.AddInput(captureDeviceInput); 

     // Create capture device output 
     captureVideoOutput = new AVCaptureVideoDataOutput(); 
     captureSession.AddOutput(captureVideoOutput); 
     captureVideoOutput.VideoSettings.PixelFormat = CVPixelFormatType.CV32BGRA; 
     captureVideoOutput.MinFrameDuration = new CMTime(1, 30); 
     // 
     // ISSUE 1 
     // In the original Objective-C code I was creating a dispatch_queue_t object, passing it to 
     // setSampleBufferDelegate:queue message and worked, here I could not find an equivalent to 
     // the queue mechanism. Also not sure if the delegate should be used like this). 
     // 
     captureVideoOutput.SetSampleBufferDelegatequeue(captureVideoDelegate, ???????); 

     // Create preview layer 
     previewLayer = AVCaptureVideoPreviewLayer.FromSession(captureSession); 
     previewLayer.Orientation = AVCaptureVideoOrientation.LandscapeRight; 
     // 
     // ISSUE 2: 
     // Didn't find any VideoGravity related enumeration in MonoTouch (not sure if string will work) 
     // 
     previewLayer.VideoGravity = "AVLayerVideoGravityResizeAspectFill"; 
     previewLayer.Frame = new RectangleF(0, 0, 1024, 768); 
     this.View.Layer.AddSublayer(previewLayer); 

     // Start capture session 
     captureSession.StartRunning(); 

    } 

    #endregion 

    public class CaptureVideoDelegate : AVCaptureVideoDataOutputSampleBufferDelegate 
    { 
     private VirtualDeckViewController mainViewController; 

     public CaptureVideoDelegate(VirtualDeckViewController viewController) 
     { 
      mainViewController = viewController; 
     } 

     public override void DidOutputSampleBuffer (AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection) 
     { 
      // TODO: Implement - see: http://go-mono.com/docs/index.aspx?link=T%3aMonoTouch.Foundation.ModelAttribute 

     } 
    } 

问题1: 不知道如何在SetSampleBufferDelegatequeue方法正确使用委托。还没有找到一个等效的机制dispatch_queue_t对象,在Objective-C中正常工作以传递第二个参数。

问题2: 我在MonoTouch库中找不到任何VideoGravity枚举,不知道传递一个具有常量值的字符串是否可行。

我有寻找任何线索来解决这个问题,但没有明确的样本。任何有关如何在MonoTouch中执行相同操作的示例或信息都将不胜感激。

非常感谢。

回答

1

所有问题终于做工精细,冻结是因为发生在我的测试中,我还没有处置的sampleBuffer的方法DidOutputSampleBuffer。我的观点的最终代码是这样的:

UPDATE 1:更改VideoSettings CVPixelFormat的分配,是不正确的,并会导致sampleBuffer中出现错误的BytesPerPixel。

public partial class VirtualDeckViewController : UIViewController 
{ 
    public CaptureVideoDelegate captureVideoDelegate; 

    public AVCaptureVideoPreviewLayer previewLayer; 
    public AVCaptureSession captureSession; 
    public AVCaptureDevice captureDevice; 
    public AVCaptureDeviceInput captureDeviceInput; 
    public AVCaptureVideoDataOutput captureVideoOutput; 

...

public override void ViewDidLoad() 
    { 
     base.ViewDidLoad(); 

     SetupVideoCaptureSession(); 
    } 

    public void SetupVideoCaptureSession() 
    { 
     // Create notifier delegate class 
     captureVideoDelegate = new CaptureVideoDelegate(); 

     // Create capture session 
     captureSession = new AVCaptureSession(); 
     captureSession.BeginConfiguration(); 
     captureSession.SessionPreset = AVCaptureSession.Preset640x480; 

     // Create capture device 
     captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); 

     // Create capture device input 
     NSError error; 
     captureDeviceInput = new AVCaptureDeviceInput(captureDevice, out error); 
     captureSession.AddInput(captureDeviceInput); 

     // Create capture device output 
     captureVideoOutput = new AVCaptureVideoDataOutput(); 
     captureVideoOutput.AlwaysDiscardsLateVideoFrames = true; 
        // UPDATE: Wrong videosettings assignment 
     //captureVideoOutput.VideoSettings.PixelFormat = CVPixelFormatType.CV32BGRA; 
        // UPDATE Correct videosettings assignment 
        captureVideoOutput.VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA); 
     captureVideoOutput.MinFrameDuration = new CMTime(1, 30); 
     DispatchQueue dispatchQueue = new DispatchQueue("VideoCaptureQueue"); 
     captureVideoOutput.SetSampleBufferDelegateAndQueue(captureVideoDelegate, dispatchQueue); 
     captureSession.AddOutput(captureVideoOutput); 

     // Create preview layer 
     previewLayer = AVCaptureVideoPreviewLayer.FromSession(captureSession); 
     previewLayer.Orientation = AVCaptureVideoOrientation.LandscapeLeft; 
     previewLayer.VideoGravity = "AVLayerVideoGravityResizeAspectFill"; 
     previewLayer.Frame = new RectangleF(0, 0, 1024, 768); 
     this.View.Layer.AddSublayer(previewLayer); 

     // Start capture session 
     captureSession.CommitConfiguration(); 
     captureSession.StartRunning(); 
    } 

    public class CaptureVideoDelegate : AVCaptureVideoDataOutputSampleBufferDelegate 
    { 
     public CaptureVideoDelegate() : base() 
     { 
     } 

     public override void DidOutputSampleBuffer (AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection) 
     { 
      // TODO: Implement buffer processing 

      // Very important (buffer needs to be disposed or it will freeze) 
      sampleBuffer.Dispose(); 
     } 
    } 

拼图的最后一块得到的回答是米格尔奥德伊卡萨样品我终于找到了这里:link

由于米格尔和Pavel

1

这是我的代码。好好使用它。我只是删除了重要的东西,所有初始化都在那里,以及读取输出缓冲区的示例。

然后,我有处理CVImageBuffer形式的链接自定义ObjC库的代码,如果您需要在Monotouch中处理此内容,那么您需要多花一分钟并将其转换为CGImage或UIImage。在Monotouch(AFAIK)中没有这个功能,所以你需要自己绑定它,从普通的ObjC。在样品ObjC是在这里:解决how to convert a CVImageBufferRef to UIImage

public void InitCapture() 
     { 
      try 
      { 
       // Setup the input 
       NSError error = new NSError(); 
       captureInput = new AVCaptureDeviceInput (AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video), out error); 

       // Setup the output 
       captureOutput = new AVCaptureVideoDataOutput(); 
       captureOutput.AlwaysDiscardsLateVideoFrames = true; 
       captureOutput.SetSampleBufferDelegateAndQueue (avBufferDelegate, dispatchQueue); 
       captureOutput.MinFrameDuration = new CMTime (1, 10); 

       // Set the video output to store frame in BGRA (compatible across devices) 
       captureOutput.VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA); 

       // Create a capture session 
       captureSession = new AVCaptureSession(); 
       captureSession.SessionPreset = AVCaptureSession.PresetMedium; 
       captureSession.AddInput (captureInput); 
       captureSession.AddOutput (captureOutput); 

       // Setup the preview layer 
       prevLayer = new AVCaptureVideoPreviewLayer (captureSession); 
       prevLayer.Frame = liveView.Bounds; 
       prevLayer.VideoGravity = "AVLayerVideoGravityResize"; // image may be slightly distorted, but red bar position will be accurate 

       liveView.Layer.AddSublayer (prevLayer); 

       StartLiveDecoding(); 
      } 
      catch (Exception ex) 
      { 
       Console.WriteLine (ex.ToString()); 
      } 
     } 

public void DidOutputSampleBuffer (AVCaptureOutput captureOutput, MonoTouch.CoreMedia.CMSampleBuffer sampleBuffer, AVCaptureConnection connection) 
     { 
      Console.WriteLine ("DidOutputSampleBuffer: enter"); 

      if (isScanning) 
      { 
       CVImageBuffer imageBuffer = sampleBuffer.GetImageBuffer(); 

       Console.WriteLine ("DidOutputSampleBuffer: calling decode"); 

       //  NSLog(@"got image w=%d h=%d bpr=%d",CVPixelBufferGetWidth(imageBuffer), CVPixelBufferGetHeight(imageBuffer), CVPixelBufferGetBytesPerRow(imageBuffer)); 
       // call the decoder 
       DecodeImage (imageBuffer); 
      } 
      else 
      { 
       Console.WriteLine ("DidOutputSampleBuffer: not scanning"); 
      } 

      Console.WriteLine ("DidOutputSampleBuffer: quit"); 
     } 
+0

的在Init结尾的StartLiveDecoding函数并没有做太多的工作,只需调用 //启动视频捕捉 captureSession.StartRunning(); – 2011-05-10 17:22:30

+0

谢谢,这意味着MonoTouch支持一种解决方案。问题#2的答案在那里,但仍不知道您的dispatchQueue是如何创建的。我猜avBufferDelegate是一个从委托类下降的类的实例。剩下的问题与dispatchQueue有关。非常感谢Pavel,缓冲区转换不成问题。 – 2011-05-10 21:53:37

+0

托管创建捕获会话工作,其中一个问题是尝试使用SetSampleBufferDelegatequeue而不是SetSampleBufferDelegateAndQueue(不知道有什么区别)。但是现在我遇到了一个问题,预览中的图像会在几帧后冻结,但是如果我在DidOutputSampleBuffer中放置了断点,那么在该断点中停止执行时,预览图层中的图像会继续显示正常。我想它必须做我创建调度队列的方式。有关如何正确设置调度队列的任何线索?感谢任何帮助。 – 2011-05-11 13:16:01