2017-04-21 105 views
9

我使用CVMetalTextureCacheCreateTextureFromImage获得CVMetalTexture然后CVMetalTextureGetTexture得到MTLTexture创建MTLTextureCVImageBuffer秒(从摄像机和播放器)。握住从CVImageBuffer一个MTLTexture导致口吃

我看到的问题是,当我后来使用Metal渲染纹理时,我偶尔会看到视频帧呈现乱序(视觉上它在时间上来回拖尾),大概是因为CoreVideo正在修改底层的CVImageBuffer存储而MTLTexture只是指向那里。

是否有任何方法让CoreVideo不触碰该缓冲区,并使用另一个缓冲区,直到我释放MTLTexture对象?

我目前的解决方法是阻击器使用MTLBlitCommandEncoder质感,但因为我只需要坚持到纹理〜30毫秒,这似乎是不必要的。

+0

在”金属质感“完成之前,您是否坚持强调”CVMetalTexture“?或者,你只是强调对“MTLTexture”对象的引用? –

+0

由于一些实现细节,我仅仅强调了对“MTLTexture”的引用。将持有'CVMetalTexture'或'CVImageBuffer'对象解决我的问题? – Blixt

+0

我不知道。它可能。这只是我的一个猜测。如果你可以轻松尝试,你应该。 :) –

回答

0

似乎你的问题取决于你如何管理会议获取原始相机数据。

我想你可以分析相机会在深,实时了解您的会话与此类当前状态(MetalCameraSession):

import AVFoundation 
import Metal 
public protocol MetalCameraSessionDelegate { 
    func metalCameraSession(_ session: MetalCameraSession, didReceiveFrameAsTextures: [MTLTexture], withTimestamp: Double) 
    func metalCameraSession(_ session: MetalCameraSession, didUpdateState: MetalCameraSessionState, error: MetalCameraSessionError?) 
} 
public final class MetalCameraSession: NSObject { 
    public var frameOrientation: AVCaptureVideoOrientation? { 
     didSet { 
      guard 
       let frameOrientation = frameOrientation, 
       let outputData = outputData, 
       outputData.connection(withMediaType: AVMediaTypeVideo).isVideoOrientationSupported 
      else { return } 

      outputData.connection(withMediaType: AVMediaTypeVideo).videoOrientation = frameOrientation 
     } 
    } 
    public let captureDevicePosition: AVCaptureDevicePosition 
    public var delegate: MetalCameraSessionDelegate? 
    public let pixelFormat: MetalCameraPixelFormat 
    public init(pixelFormat: MetalCameraPixelFormat = .rgb, captureDevicePosition: AVCaptureDevicePosition = .back, delegate: MetalCameraSessionDelegate? = nil) { 
     self.pixelFormat = pixelFormat 
     self.captureDevicePosition = captureDevicePosition 
     self.delegate = delegate 
     super.init(); 
     NotificationCenter.default.addObserver(self, selector: #selector(captureSessionRuntimeError), name: NSNotification.Name.AVCaptureSessionRuntimeError, object: nil) 
    } 
    public func start() { 
     requestCameraAccess() 
     captureSessionQueue.async(execute: { 
      do { 
       self.captureSession.beginConfiguration() 
       try self.initializeInputDevice() 
       try self.initializeOutputData() 
       self.captureSession.commitConfiguration() 
       try self.initializeTextureCache() 
       self.captureSession.startRunning() 
       self.state = .streaming 
      } 
      catch let error as MetalCameraSessionError { 
       self.handleError(error) 
      } 
      catch { 
       print(error.localizedDescription) 
      } 
     }) 
    } 
    public func stop() { 
     captureSessionQueue.async(execute: { 
      self.captureSession.stopRunning() 
      self.state = .stopped 
     }) 
    } 
    fileprivate var state: MetalCameraSessionState = .waiting { 
     didSet { 
      guard state != .error else { return } 

      delegate?.metalCameraSession(self, didUpdateState: state, error: nil) 
     } 
    } 
    fileprivate var captureSession = AVCaptureSession() 
    internal var captureDevice = MetalCameraCaptureDevice() 
    fileprivate var captureSessionQueue = DispatchQueue(label: "MetalCameraSessionQueue", attributes: []) 
#if arch(i386) || arch(x86_64) 
#else 
    /// Texture cache we will use for converting frame images to textures 
    internal var textureCache: CVMetalTextureCache? 
#endif 
    fileprivate var metalDevice = MTLCreateSystemDefaultDevice() 
    internal var inputDevice: AVCaptureDeviceInput? { 
     didSet { 
      if let oldValue = oldValue { 
       captureSession.removeInput(oldValue) 
      } 
      captureSession.addInput(inputDevice) 
     } 
    } 
    internal var outputData: AVCaptureVideoDataOutput? { 
     didSet { 
      if let oldValue = oldValue { 
       captureSession.removeOutput(oldValue) 
      } 
      captureSession.addOutput(outputData) 
     } 
    } 
    fileprivate func requestCameraAccess() { 
     captureDevice.requestAccessForMediaType(AVMediaTypeVideo) { 
      (granted: Bool) -> Void in 
      guard granted else { 
       self.handleError(.noHardwareAccess) 
       return 
      } 

      if self.state != .streaming && self.state != .error { 
       self.state = .ready 
      } 
     } 
    } 
    fileprivate func handleError(_ error: MetalCameraSessionError) { 
     if error.isStreamingError() { 
      state = .error 
     } 

     delegate?.metalCameraSession(self, didUpdateState: state, error: error) 
    } 
    fileprivate func initializeTextureCache() throws { 
#if arch(i386) || arch(x86_64) 
     throw MetalCameraSessionError.failedToCreateTextureCache 
#else 
     guard 
      let metalDevice = metalDevice, 
      CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &textureCache) == kCVReturnSuccess 
     else { 
      throw MetalCameraSessionError.failedToCreateTextureCache 
     } 
#endif 
    } 
    fileprivate func initializeInputDevice() throws { 
     var captureInput: AVCaptureDeviceInput! 
     guard let inputDevice = captureDevice.device(mediaType: AVMediaTypeVideo, position: captureDevicePosition) else { 
      throw MetalCameraSessionError.requestedHardwareNotFound 
     } 
     do { 
      captureInput = try AVCaptureDeviceInput(device: inputDevice) 
     } 
     catch { 
      throw MetalCameraSessionError.inputDeviceNotAvailable 
     } 
     guard captureSession.canAddInput(captureInput) else { 
      throw MetalCameraSessionError.failedToAddCaptureInputDevice 
     } 
     self.inputDevice = captureInput 
    } 
    fileprivate func initializeOutputData() throws { 
     let outputData = AVCaptureVideoDataOutput() 

     outputData.videoSettings = [ 
      kCVPixelBufferPixelFormatTypeKey as AnyHashable : Int(pixelFormat.coreVideoType) 
     ] 
     outputData.alwaysDiscardsLateVideoFrames = true 
     outputData.setSampleBufferDelegate(self, queue: captureSessionQueue) 

     guard captureSession.canAddOutput(outputData) else { 
      throw MetalCameraSessionError.failedToAddCaptureOutput 
     } 

     self.outputData = outputData 
    } 
    @objc 
    fileprivate func captureSessionRuntimeError() { 
     if state == .streaming { 
      handleError(.captureSessionRuntimeError) 
     } 
    } 
    deinit { 
     NotificationCenter.default.removeObserver(self) 
    } 
} 
extension MetalCameraSession: AVCaptureVideoDataOutputSampleBufferDelegate { 
#if arch(i386) || arch(x86_64) 
#else 
    private func texture(sampleBuffer: CMSampleBuffer?, textureCache: CVMetalTextureCache?, planeIndex: Int = 0, pixelFormat: MTLPixelFormat = .bgra8Unorm) throws -> MTLTexture { 
     guard let sampleBuffer = sampleBuffer else { 
      throw MetalCameraSessionError.missingSampleBuffer 
     } 
     guard let textureCache = textureCache else { 
      throw MetalCameraSessionError.failedToCreateTextureCache 
     } 
     guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { 
      throw MetalCameraSessionError.failedToGetImageBuffer 
     } 
     let isPlanar = CVPixelBufferIsPlanar(imageBuffer) 
     let width = isPlanar ? CVPixelBufferGetWidthOfPlane(imageBuffer, planeIndex) : CVPixelBufferGetWidth(imageBuffer) 
     let height = isPlanar ? CVPixelBufferGetHeightOfPlane(imageBuffer, planeIndex) : CVPixelBufferGetHeight(imageBuffer) 
     var imageTexture: CVMetalTexture? 
     let result = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, imageBuffer, nil, pixelFormat, width, height, planeIndex, &imageTexture) 
     guard 
      let unwrappedImageTexture = imageTexture, 
      let texture = CVMetalTextureGetTexture(unwrappedImageTexture), 
      result == kCVReturnSuccess 
     else { 
      throw MetalCameraSessionError.failedToCreateTextureFromImage 
     } 
     return texture 
    } 
    private func timestamp(sampleBuffer: CMSampleBuffer?) throws -> Double { 
     guard let sampleBuffer = sampleBuffer else { 
      throw MetalCameraSessionError.missingSampleBuffer 
     } 

     let time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer) 

     guard time != kCMTimeInvalid else { 
      throw MetalCameraSessionError.failedToRetrieveTimestamp 
     } 

     return (Double)(time.value)/(Double)(time.timescale); 
    } 
    @objc public func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 
     do { 
      var textures: [MTLTexture]! 

      switch pixelFormat { 
      case .rgb: 
       let textureRGB = try texture(sampleBuffer: sampleBuffer, textureCache: textureCache) 
       textures = [textureRGB] 
      case .yCbCr: 
       let textureY = try texture(sampleBuffer: sampleBuffer, textureCache: textureCache, planeIndex: 0, pixelFormat: .r8Unorm) 
       let textureCbCr = try texture(sampleBuffer: sampleBuffer, textureCache: textureCache, planeIndex: 1, pixelFormat: .rg8Unorm) 
       textures = [textureY, textureCbCr] 
      } 

      let timestamp = try self.timestamp(sampleBuffer: sampleBuffer) 

      delegate?.metalCameraSession(self, didReceiveFrameAsTextures: textures, withTimestamp: timestamp) 
     } 
     catch let error as MetalCameraSessionError { 
      self.handleError(error) 
     } 
     catch { 
      print(error.localizedDescription) 
     } 
    } 
#endif 
} 

有了这个类要知道不同的会话类型和occours错误(MetalCameraSessionTypes):

import AVFoundation 
public enum MetalCameraSessionState { 
    case ready 
    case streaming 
    case stopped 
    case waiting 
    case error 
} 
public enum MetalCameraPixelFormat { 
    case rgb 
    case yCbCr 
    var coreVideoType: OSType { 
     switch self { 
     case .rgb: 
      return kCVPixelFormatType_32BGRA 
     case .yCbCr: 
      return kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange 
     } 
    } 
} 
public enum MetalCameraSessionError: Error { 
    case noHardwareAccess 
    case failedToAddCaptureInputDevice 
    case failedToAddCaptureOutput 
    case requestedHardwareNotFound 
    case inputDeviceNotAvailable 
    case captureSessionRuntimeError 
    case failedToCreateTextureCache 
    case missingSampleBuffer 
    case failedToGetImageBuffer 
    case failedToCreateTextureFromImage 
    case failedToRetrieveTimestamp 
    public func isStreamingError() -> Bool { 
     switch self { 
     case .noHardwareAccess, .failedToAddCaptureInputDevice, .failedToAddCaptureOutput, .requestedHardwareNotFound, .inputDeviceNotAvailable, .captureSessionRuntimeError: 
      return true 
     default: 
      return false 
     } 
    } 
    public var localizedDescription: String { 
     switch self { 
     case .noHardwareAccess: 
      return "Failed to get access to the hardware for a given media type." 
     case .failedToAddCaptureInputDevice: 
      return "Failed to add a capture input device to the capture session." 
     case .failedToAddCaptureOutput: 
      return "Failed to add a capture output data channel to the capture session." 
     case .requestedHardwareNotFound: 
      return "Specified hardware is not available on this device." 
     case .inputDeviceNotAvailable: 
      return "Capture input device cannot be opened, probably because it is no longer available or because it is in use." 
     case .captureSessionRuntimeError: 
      return "AVCaptureSession runtime error." 
     case .failedToCreateTextureCache: 
      return "Failed to initialize texture cache." 
     case .missingSampleBuffer: 
      return "No sample buffer to convert the image from." 
     case .failedToGetImageBuffer: 
      return "Failed to retrieve an image buffer from camera's output sample buffer." 
     case .failedToCreateTextureFromImage: 
      return "Failed to convert the frame to a Metal texture." 
     case .failedToRetrieveTimestamp: 
      return "Failed to retrieve timestamp from the sample buffer." 
     } 
    } 
} 

然后你可以使用的AVFoundation的包装有实例方法,而不是类的人(MetalCameraCaptureDevice):

import AVFoundation 
internal class MetalCameraCaptureDevice { 
    internal func device(mediaType: String, position: AVCaptureDevicePosition) -> AVCaptureDevice? { 
     guard let devices = AVCaptureDevice.devices(withMediaType: mediaType) as? [AVCaptureDevice] else { return nil } 

     if let index = devices.index(where: { $0.position == position }) { 
      return devices[index] 
     } 
     return nil 
    } 
    internal func requestAccessForMediaType(_ mediaType: String!, completionHandler handler: ((Bool) -> Void)!) { 
     AVCaptureDevice.requestAccess(forMediaType: mediaType, completionHandler: handler) 
    } 
} 

然后,你可以有一个自定义的viewController类来控制摄像机像这样的(CameraViewController):

import UIKit 
import Metal 
internal final class CameraViewController: MTKViewController { 
    var session: MetalCameraSession? 
    override func viewDidLoad() { 
     super.viewDidLoad() 
     session = MetalCameraSession(delegate: self) 
    } 
    override func viewWillAppear(_ animated: Bool) { 
     super.viewWillAppear(animated) 
     session?.start() 
    } 
    override func viewDidDisappear(_ animated: Bool) { 
     super.viewDidDisappear(animated) 
     session?.stop() 
    } 
} 
// MARK: - MetalCameraSessionDelegate 
extension CameraViewController: MetalCameraSessionDelegate { 
    func metalCameraSession(_ session: MetalCameraSession, didReceiveFrameAsTextures textures: [MTLTexture], withTimestamp timestamp: Double) { 
     self.texture = textures[0] 
    } 
    func metalCameraSession(_ cameraSession: MetalCameraSession, didUpdateState state: MetalCameraSessionState, error: MetalCameraSessionError?) { 
     if error == .captureSessionRuntimeError { 
      print(error?.localizedDescription ?? "None") 
      cameraSession.start() 
     } 
     DispatchQueue.main.async { 
      self.title = "Metal camera: \(state)" 
     } 
     print("Session changed state to \(state) with error: \(error?.localizedDescription ?? "None").") 
    } 
} 

最后你的班级可能就像这样(MTKViewController),你可以在这里学习:

public func draw(in: MTKView) 

让你正是你从缓冲区相机预计MTLTexture

import UIKit 
import Metal 
#if arch(i386) || arch(x86_64) 
#else 
    import MetalKit 
#endif 
open class MTKViewController: UIViewController { 
    open var texture: MTLTexture? 
    open func willRenderTexture(_ texture: inout MTLTexture, withCommandBuffer commandBuffer: MTLCommandBuffer, device: MTLDevice) { 
    } 
    open func didRenderTexture(_ texture: MTLTexture, withCommandBuffer commandBuffer: MTLCommandBuffer, device: MTLDevice) { 
    } 
    override open func loadView() { 
     super.loadView() 
#if arch(i386) || arch(x86_64) 
     NSLog("Failed creating a default system Metal device, since Metal is not available on iOS Simulator.") 
#else 
     assert(device != nil, "Failed creating a default system Metal device. Please, make sure Metal is available on your hardware.") 
#endif 
     initializeMetalView() 
     initializeRenderPipelineState() 
    } 
    fileprivate func initializeMetalView() { 
#if arch(i386) || arch(x86_64) 
#else 
     metalView = MTKView(frame: view.bounds, device: device) 
     metalView.delegate = self 
     metalView.framebufferOnly = true 
     metalView.colorPixelFormat = .bgra8Unorm 
     metalView.contentScaleFactor = UIScreen.main.scale 
     metalView.autoresizingMask = [.flexibleWidth, .flexibleHeight] 
     view.insertSubview(metalView, at: 0) 
#endif 
    } 
#if arch(i386) || arch(x86_64) 
#else 
    internal var metalView: MTKView! 
#endif 
    internal var device = MTLCreateSystemDefaultDevice() 
    internal var renderPipelineState: MTLRenderPipelineState? 
    fileprivate let semaphore = DispatchSemaphore(value: 1) 
    fileprivate func initializeRenderPipelineState() { 
     guard 
      let device = device, 
      let library = device.newDefaultLibrary() 
     else { return } 

     let pipelineDescriptor = MTLRenderPipelineDescriptor() 
     pipelineDescriptor.sampleCount = 1 
     pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm 
     pipelineDescriptor.depthAttachmentPixelFormat = .invalid 
     pipelineDescriptor.vertexFunction = library.makeFunction(name: "mapTexture") 
     pipelineDescriptor.fragmentFunction = library.makeFunction(name: "displayTexture") 
     do { 
      try renderPipelineState = device.makeRenderPipelineState(descriptor: pipelineDescriptor) 
     } 
     catch { 
      assertionFailure("Failed creating a render state pipeline. Can't render the texture without one.") 
      return 
     } 
    } 
} 
#if arch(i386) || arch(x86_64) 
#else 
extension MTKViewController: MTKViewDelegate { 
    public func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) { 
     NSLog("MTKView drawable size will change to \(size)") 
    } 
    public func draw(in: MTKView) { 
     _ = semaphore.wait(timeout: DispatchTime.distantFuture) 
     autoreleasepool { 
      guard 
       var texture = texture, 
       let device = device 
      else { 
       _ = semaphore.signal() 
       return 
      } 
      let commandBuffer = device.makeCommandQueue().makeCommandBuffer() 
      willRenderTexture(&texture, withCommandBuffer: commandBuffer, device: device) 
      render(texture: texture, withCommandBuffer: commandBuffer, device: device) 
     } 
    } 
    private func render(texture: MTLTexture, withCommandBuffer commandBuffer: MTLCommandBuffer, device: MTLDevice) { 
     guard 
      let currentRenderPassDescriptor = metalView.currentRenderPassDescriptor, 
      let currentDrawable = metalView.currentDrawable, 
      let renderPipelineState = renderPipelineState 
     else { 
      semaphore.signal() 
      return 
     } 
     let encoder = commandBuffer.makeRenderCommandEncoder(descriptor: currentRenderPassDescriptor) 
     encoder.pushDebugGroup("RenderFrame") 
     encoder.setRenderPipelineState(renderPipelineState) 
     encoder.setFragmentTexture(texture, at: 0) 
     encoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4, instanceCount: 1) 
     encoder.popDebugGroup() 
     encoder.endEncoding() 
     commandBuffer.addScheduledHandler { [weak self] (buffer) in 
      guard let unwrappedSelf = self else { return } 

      unwrappedSelf.didRenderTexture(texture, withCommandBuffer: buffer, device: device) 
      unwrappedSelf.semaphore.signal() 
     } 
     commandBuffer.present(currentDrawable) 
     commandBuffer.commit() 
    } 
} 
#endif 

现在,你拥有所有的资源,但你也可以找到所有的navoshta(作者)GitHub上项目here完整的所有意见和说明有关代码和对这个项目有很大的教程here 尤其是第二部分,在那里你可以得到的质地(你可以找到下面这段代码在MetalCameraSession类):

guard 
    let unwrappedImageTexture = imageTexture, 
    let texture = CVMetalTextureGetTexture(unwrappedImageTexture), 
    result == kCVReturnSuccess 
else { 
    throw MetalCameraSessionError.failedToCreateTextureFromImage 
} 
+0

这是来自repo的很多源代码,但并没有真正告诉我从哪里开始寻找代码中的错误,即使此源代码可以解决问题。乍一看,我似乎正在做这个源代码的工作,你有什么线索可以避免闪烁问题的重要部分? – Blixt

+0

我想你应该把注意力集中在捕获会话上,以确保你的缓冲区没有被错误的线程时间所损害,例如捕获的开始和设备的真实状态:信号量(MTKViewController)的存在控制缓冲区的流量是精湛的,保证管道的正确建设.. –

+0

你有试过这个库吗? –

0

的问题可能是由于相机输入引起的。如果您的素材与预期输出的帧速率不完全相同,帧速率不匹配将导致奇怪的重影。请尝试禁用自动调整帧速率。

这个问题的其他原因可能是由于以下几点:

CRITICAL SPEEDS:有同步了帧速率,使他们造成口吃一定速度。帧率越低,问题就越明显。

SUB像素插值:还有其它情况下,帧之间的子像素内插引起细节的区域在帧之间闪烁。

成功渲染的解决方案是使用合适的速度(每秒的像素),用于帧速率,添加足够的运动模糊以隐藏问题,或缩小图像中详细的量。

+0

输入不能真的是问题,因为如果我在回调中复制缓冲区,一切都很好。这个问题只有在我从缓冲区获得“MTLTexture”后才会显现出来,并且稍后尝试渲染它(在回调之外)。我没有看到提供给我的视频数据中的任何文物。 – Blixt

4

我最近遇到了这个完全相同的问题。问题是,除非拥有CVMetalTextureRef,否则MTLTexture无效。在使用MTLTexture的整个过程中(直到当前渲染周期结束时),您必须保持对CVMetalTextureRef的引用。

+0

这是从Metal下的CMSampleBufferRefs成功构造纹理的关键,谢谢! – C0C0AL0C0

+0

C0C0AL0C0我认为这也是苹果示例代码MetalVideoCapture屏幕撕裂的解决方案吗? https://stackoverflow.com/questions/38879518/screen-tearing-and-camera-capture-with-metal(正如你已经回答了我相信的问题,但已经删除它) – Gary

1

我遇到了同样的问题,但有一个额外的参考CVMetalTexture对象并没有解决我的情况这个问题。

据我所知,它时,我在我的金属代码处理完前一帧之前接收来自摄像头一个新的帧才会发生。

似乎CVMetalTextureCacheCreateTextureFromImage简单地创建在像素缓冲器的顶部上的纹理照相机将数据输入它。因此,从Metal代码异步访问它会导致一些问题。

我决定创建MTLTexture副本(这也是异步的,但速度不够快)。

这里是CVMetalTextureCacheCreateTextureFromImage()

“该函数创建或返回根据指定映射到图像缓冲器缓存corevideo的金属质感缓冲器,创建现场基于设备的图像缓冲器及一个之间的结合的描述MTLTexture对象“。,