2017-05-30 241 views
2

我将示例缓冲区转换为CGContext。然后我对上下文应用一个转换,并从中创建一个CIImage,然后将其显示在UIImageView中。以任意角度旋转CMSampleBuffer并将其附加到AVSsetWriterInput中swift 3

与此同时,我想将此附加到AVAssetWriterInput以创建这些转换的电影。

到目前为止,我适用于上下文的转换没有任何效果。当我在图像视图中显示所谓的转换图像时。它看起来完全一样。

UPDATE: 我设法将采样缓冲区记录到一个视频文件(它仍然拉伸,因为方向错误)。我用这个代码为基础

http://geek-is-stupid.github.io/blog/2017/04/13/how-to-record-detect-face-overlay-video-at-real-time-using-swift/

但我仍然将旋转到CGContext上挣扎。基本上我对上下文做的所有事情都完全被忽略了。

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 

     let writable = canWrite() 
     if writable , sessionAtSourceTime == nil { 
       print("starting session") 
       sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer) 
       assetWriter!.startSession(atSourceTime: sessionAtSourceTime!) 
      } 

     let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 
     if writable { 
      autoreleasepool { 
       CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) 
       var renderedOutputPixelBuffer: CVPixelBuffer? = nil 
       let options = [ 
        kCVPixelBufferCGImageCompatibilityKey as String: true, 
        kCVPixelBufferCGBitmapContextCompatibilityKey as String: true,] as CFDictionary 
       let status = CVPixelBufferCreate(kCFAllocatorDefault, 
               CVPixelBufferGetWidth(pixelBuffer), 
               CVPixelBufferGetHeight(pixelBuffer), 
               kCVPixelFormatType_32BGRA, options, 
               &renderedOutputPixelBuffer) 
       guard status == kCVReturnSuccess else { return } 

       CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0)) 

       let renderedOutputPixelBufferBaseAddress = CVPixelBufferGetBaseAddress(renderedOutputPixelBuffer!) 

       memcpy(renderedOutputPixelBufferBaseAddress,CVPixelBufferGetBaseAddress(pixelBuffer),CVPixelBufferGetHeight(pixelBuffer) * CVPixelBufferGetBytesPerRow(pixelBuffer)) 

       CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!, CVPixelBufferLockFlags(rawValue: 0)) 

       let context = CGContext(data: renderedOutputPixelBufferBaseAddress, 
             width: CVPixelBufferGetWidth(renderedOutputPixelBuffer!), 
             height: CVPixelBufferGetHeight(renderedOutputPixelBuffer!), 
             bitsPerComponent: 8, 
             bytesPerRow: CVPixelBufferGetBytesPerRow(renderedOutputPixelBuffer!), 
             space: CGColorSpaceCreateDeviceRGB(), 
             bitmapInfo: bitmapInfo!) 


       let radians : Float = atan2f(Float(boxView!.transform.b), Float(boxView!.transform.a)); 
       context!.translateBy(x: self.view.frame.size.width/2, y: self.view.frame.size.height/2) 
       context!.rotate(by:CGFloat(radians)) 

       let image: CGImage = context!.makeImage()! 

       self.imageView!.image = UIImage(cgImage: image) 

       if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() { 
        bufferAdaptor?.append(renderedOutputPixelBuffer!, withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) 
       } 

      CVPixelBufferUnlockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0)) 
      CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0)) 
     } 
    } 

回答

1

找到了解决方案。低于代码的重要部分。

//create pixelbuffer from the delegate method samplebuffer 
    let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 
    CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) 
    //create CI image from the buffer 
    let ci = CIImage.init(cvPixelBuffer: pixelBuffer, options: options) 
    //create filter to rotate 
    let filter = CIFilter.init(name: "CIAffineTransform") 
    //create transform, move rotation point to center    
    var transform = CGAffineTransform(translationX: self.view.frame.midX, y: self.view.frame.midY) 
    //rotate it 
    transform = transform.rotate(angle: CGFloat(radians)) 
    // move the transform point back to the original 
    transform = transform.translatedBy(x: -self.view.frame.midX, y: -self.view.frame.midY) 

    filter!.setValue(transform, forKey: kCIInputTransformKey) 
    filter!.setValue(ci, forKey: kCIInputImageKey) 
    //take the output from the filter 
    let output = filter?.outputImage 
    //create empty pixelbuffer 
    var newPixelBuffer : CVPixelBuffer? = nil 

    CVPixelBufferCreate(kCFAllocatorDefault, Int(self.view.frame.width) , 
            Int(self.view.frame.height), 
            kCVPixelFormatType_32BGRA, 
            nil, 
            &newPixelBuffer) 
    //render the context to the new pixelbuffer, context is a global 
    //CIContext variable. creating a new one each frame is too CPU intensive    
    context.render(output!, to: newPixelBuffer!) 

    //finally, write this to the pixelbufferadaptor    
    if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() { 
     bufferAdaptor?.append(newPixelBuffer!, 
         withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) 

     } 

    CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))