2011-02-03 72 views
10

我想从使用Python(版本2.7或2.6)和PyObjC(版本2.2)内置到Macbook Pro中的Apple iSight摄像头捕获单帧。如何使用Python和PyObjC从Apple iSight捕获帧?

作为一个起点,我用this old StackOverflow问题。为了验证它是有道理的,我交叉引用了它似乎基于的Apple's MyRecorder示例。不幸的是,我的脚本不起作用。

我的大问题是:

  • 我是否正确初始化相机?
  • 我开始正确的事件循环?
  • 有没有其他设置我应该做的?

在下面粘贴的示例脚本中,预期的操作是在调用startImageCapture()后,我应该开始打印CaptureDelegate的“Got a frame ...”消息。但是,相机的指示灯永远不会亮起,委托人的回叫从不执行。

另外,在startImageCapture()中没有失败,所有函数声称成功,并且它成功找到iSight设备。分析pdb中的会话对象显示它具有有效的输入和输出对象,输出具有分配的委托,该设备未被其他进程使用,并且在调用startRunning()之后会话被标记为正在运行。

下面的代码:

#!/usr/bin/env python2.7 

import sys 
import os 
import time 
import objc 
import QTKit 
import AppKit 
from Foundation import NSObject 
from Foundation import NSTimer 
from PyObjCTools import AppHelper 
objc.setVerbose(True) 

class CaptureDelegate(NSObject): 
    def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput, 
                      videoFrame, sampleBuffer, 
                      connection): 
     # This should get called for every captured frame 
     print "Got a frame: %s" % videoFrame 

class QuitClass(NSObject): 
    def quitMainLoop_(self, aTimer): 
     # Just stop the main loop. 
     print "Quitting main loop." 
     AppHelper.stopEventLoop() 


def startImageCapture(): 
    error = None 

    # Create a QT Capture session 
    session = QTKit.QTCaptureSession.alloc().init() 

    # Find iSight device and open it 
    dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo) 
    print "Device: %s" % dev 
    if not dev.open_(error): 
     print "Couldn't open capture device." 
     return 

    # Create an input instance with the device we found and add to session 
    input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev) 
    if not session.addInput_error_(input, error): 
     print "Couldn't add input device." 
     return 

    # Create an output instance with a delegate for callbacks and add to session 
    output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init() 
    delegate = CaptureDelegate.alloc().init() 
    output.setDelegate_(delegate) 
    if not session.addOutput_error_(output, error): 
     print "Failed to add output delegate." 
     return 

    # Start the capture 
    print "Initiating capture..." 
    session.startRunning() 


def main(): 
    # Open camera and start capturing frames 
    startImageCapture() 

    # Setup a timer to quit in 10 seconds (hack for now) 
    quitInst = QuitClass.alloc().init() 
    NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(10.0, 
                      quitInst, 
                      'quitMainLoop:', 
                      None, 
                      False) 
    # Start Cocoa's main event loop 
    AppHelper.runConsoleEventLoop(installInterrupt=True) 

    print "After event loop" 


if __name__ == "__main__": 
    main() 

感谢您的帮助,您可以提供!

回答

15

好吧,我花了一天的时间在PyObjC的深处潜水,并让它工作。

对于将来的记录,问题中的代码不起作用的原因:变量作用域和垃圾回收会话变量在事件处理器运行之前发生的范围超出范围时被删除。必须做一些事情来保留它,以免在它有时间运行之前释放它。

将所有内容移动到类中,并使会话一个类变量使回调开始工作。此外,下面的代码演示了如何将帧的像素数据转换为位图格式并通过Cocoa调用保存它,以及如何将其作为缓冲区或字符串复制回Python的世界视图。

下面的脚本将捕获单个帧

#!/usr/bin/env python2.7 
# 
# camera.py -- by Trevor Bentley (02/04/2011) 
# 
# This work is licensed under a Creative Commons Attribution 3.0 Unported License. 
# 
# Run from the command line on an Apple laptop running OS X 10.6, this script will 
# take a single frame capture using the built-in iSight camera and save it to disk 
# using three methods. 
# 

import sys 
import os 
import time 
import objc 
import QTKit 
from AppKit import * 
from Foundation import NSObject 
from Foundation import NSTimer 
from PyObjCTools import AppHelper 

class NSImageTest(NSObject): 
    def init(self): 
     self = super(NSImageTest, self).init() 
     if self is None: 
      return None 

     self.session = None 
     self.running = True 

     return self 

    def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput, 
                      videoFrame, sampleBuffer, 
                      connection): 
     self.session.stopRunning() # I just want one frame 

     # Get a bitmap representation of the frame using CoreImage and Cocoa calls 
     ciimage = CIImage.imageWithCVImageBuffer_(videoFrame) 
     rep = NSCIImageRep.imageRepWithCIImage_(ciimage) 
     bitrep = NSBitmapImageRep.alloc().initWithCIImage_(ciimage) 
     bitdata = bitrep.representationUsingType_properties_(NSBMPFileType, objc.NULL) 

     # Save image to disk using Cocoa 
     t0 = time.time() 
     bitdata.writeToFile_atomically_("grab.bmp", False) 
     t1 = time.time() 
     print "Cocoa saved in %.5f seconds" % (t1-t0) 

     # Save a read-only buffer of image to disk using Python 
     t0 = time.time() 
     bitbuf = bitdata.bytes() 
     f = open("python.bmp", "w") 
     f.write(bitbuf) 
     f.close() 
     t1 = time.time() 
     print "Python saved buffer in %.5f seconds" % (t1-t0) 

     # Save a string-copy of the buffer to disk using Python 
     t0 = time.time() 
     bitbufstr = str(bitbuf) 
     f = open("python2.bmp", "w") 
     f.write(bitbufstr) 
     f.close() 
     t1 = time.time() 
     print "Python saved string in %.5f seconds" % (t1-t0) 

     # Will exit on next execution of quitMainLoop_() 
     self.running = False 

    def quitMainLoop_(self, aTimer): 
     # Stop the main loop after one frame is captured. Call rapidly from timer. 
     if not self.running: 
      AppHelper.stopEventLoop() 

    def startImageCapture(self, aTimer): 
     error = None 
     print "Finding camera" 

     # Create a QT Capture session 
     self.session = QTKit.QTCaptureSession.alloc().init() 

     # Find iSight device and open it 
     dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo) 
     print "Device: %s" % dev 
     if not dev.open_(error): 
      print "Couldn't open capture device." 
      return 

     # Create an input instance with the device we found and add to session 
     input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev) 
     if not self.session.addInput_error_(input, error): 
      print "Couldn't add input device." 
      return 

     # Create an output instance with a delegate for callbacks and add to session 
     output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init() 
     output.setDelegate_(self) 
     if not self.session.addOutput_error_(output, error): 
      print "Failed to add output delegate." 
      return 

     # Start the capture 
     print "Initiating capture..." 
     self.session.startRunning() 


    def main(self): 
     # Callback that quits after a frame is captured 
     NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(0.1, 
                       self, 
                       'quitMainLoop:', 
                       None, 
                       True) 

     # Turn on the camera and start the capture 
     self.startImageCapture(None) 

     # Start Cocoa's main event loop 
     AppHelper.runConsoleEventLoop(installInterrupt=True) 

     print "Frame capture completed." 

if __name__ == "__main__": 
    test = NSImageTest.alloc().init() 
    test.main() 
+0

冷静的男人,顺便说一句,你应该标记自己的接受为'接受':) – 2011-02-04 23:12:26

+0

这个脚本很好,但写入字节文件时失败。你应该改变open('filename','w')打开('filename','wb')以字节模式打开一个文件,然后它就可以工作。 – andli 2013-08-11 07:32:34

0

QTKit被弃用,PyObjC是一个很大的相关性(似乎是棘手的建立,如果你想在自制)。加上PyObjC没有大部分AVFoundation,所以我创建了a simple camera extension for Python,它使用AVFoundation来录制视频或捕捉图片,它不需要依赖(Cython中间文件承诺避免为大多数用户使用Cython)。

应该有可能建立这样的:

pip install -e git+https://github.com/dashesy/pyavfcam.git 

然后,我们可以用它来take a picture

import pyavfcam 

# Open the default video source 
cam = pyavfcam.AVFCam(sinks='image') 
frame = cam.snap_picture('test.jpg') # frame is a memory buffer np.asarray(frame) can retrieve 

都没有涉及这个问题,但如果AVFCam类子分类,重写的方法将被调用,结果。