2016-12-28 67 views
1

我正在创建一个简单的纹理显示,通过金属显示实质上呈现BGRA格式的视频帧。我遵循金属WWDC会话中所述的相同步骤。但是我在创建渲染编码器时遇到了问题。我的代码是macOS上的金属框架

id <MTLDevice> device = MTLCreateSystemDefaultDevice(); 
id<MTLCommandQueue> commandQueue = [device newCommandQueue]; 

id<MTLLibrary> library = [device newDefaultLibrary]; 

// Create Render Command Descriptor. 
MTLRenderPipelineDescriptor* renderPipelineDesc = [MTLRenderPipelineDescriptor new]; 
renderPipelineDesc.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm; 
renderPipelineDesc.vertexFunction = [library newFunctionWithName:@"basic_vertex"]; 
renderPipelineDesc.fragmentFunction = [library newFunctionWithName:@"basic_fragment"]; 

NSError* error = nil; 
id<MTLRenderPipelineState> renderPipelineState = [device newRenderPipelineStateWithDescriptor:renderPipelineDesc 
                   error:&error]; 

id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer]; 

MTLRenderPassDescriptor* renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor]; 

id<CAMetalDrawable> drawable = [_metalLayer nextDrawable]; 

MTLRenderPassColorAttachmentDescriptor* colorAttachmentDesc = [MTLRenderPassColorAttachmentDescriptor new]; 
colorAttachmentDesc.texture = drawable.texture; 
colorAttachmentDesc.loadAction = MTLLoadActionLoad; 
colorAttachmentDesc.storeAction = MTLStoreActionStore; 
colorAttachmentDesc.clearColor = MTLClearColorMake(0, 0, 0, 1); 

[renderPassDesc.colorAttachments setObject:colorAttachmentDesc atIndexedSubscript:0]; 

[inTexture replaceRegion:region 
     mipmapLevel:0 
      withBytes:imageBytes 
     bytesPerRow:CVPixelBufferGetBytesPerRow(_image)]; 

id<MTLRenderCommandEncoder> renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc]; 

[renderCmdEncoder setRenderPipelineState:_renderPipelineState]; 
[renderCmdEncoder endEncoding]; 

在线路此代码崩溃说 “不渲染找到目标” ID renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc]; 我无法弄清楚在哪里以及如何设置渲染目标。

回答

0

这将工作完美;如果您需要帮助实现它,让我知道:

@import UIKit; 
@import AVFoundation; 
@import CoreMedia; 
#import <MetalKit/MetalKit.h> 
#import <Metal/Metal.h> 
#import <MetalPerformanceShaders/MetalPerformanceShaders.h> 

@interface ViewController : UIViewController <MTKViewDelegate, AVCaptureVideoDataOutputSampleBufferDelegate> { 
    NSString *_displayName; 
    NSString *serviceType; 
} 

@property (retain, nonatomic) SessionContainer *session; 
@property (retain, nonatomic) AVCaptureSession *avSession; 

@end; 

#import "ViewController.h" 

@interface ViewController() { 
    MTKView *_metalView; 

    id<MTLDevice> _device; 
    id<MTLCommandQueue> _commandQueue; 
    id<MTLTexture> _texture; 

    CVMetalTextureCacheRef _textureCache; 
} 

@property (strong, nonatomic) AVCaptureDevice *videoDevice; 
@property (nonatomic) dispatch_queue_t sessionQueue; 

@end 

@implementation ViewController 

- (void)viewDidLoad { 
    NSLog(@"%s", __PRETTY_FUNCTION__); 
    [super viewDidLoad]; 

    _device = MTLCreateSystemDefaultDevice(); 
    _metalView = [[MTKView alloc] initWithFrame:self.view.bounds]; 
    [_metalView setContentMode:UIViewContentModeScaleAspectFit]; 
    _metalView.device = _device; 
    _metalView.delegate = self; 
    _metalView.clearColor = MTLClearColorMake(1, 1, 1, 1); 
    _metalView.colorPixelFormat = MTLPixelFormatBGRA8Unorm; 
    _metalView.framebufferOnly = NO; 
    _metalView.autoResizeDrawable = NO; 

    CVMetalTextureCacheCreate(NULL, NULL, _device, NULL, &_textureCache); 

    [self.view addSubview:_metalView]; 

    self.sessionQueue = dispatch_queue_create("session queue", DISPATCH_QUEUE_SERIAL); 

    if ([self setupCamera]) { 
     [_avSession startRunning]; 
    } 
} 

- (BOOL)setupCamera { 
    NSLog(@"%s", __PRETTY_FUNCTION__); 
    @try { 
     NSError * error; 

      _avSession = [[AVCaptureSession alloc] init]; 
      [_avSession beginConfiguration]; 
      [_avSession setSessionPreset:AVCaptureSessionPreset640x480]; 

      // get list of devices; connect to front-facing camera 
      self.videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; 
      if (self.videoDevice == nil) return FALSE; 

      AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:self.videoDevice error:&error]; 
      [_avSession addInput:input]; 

      dispatch_queue_t sampleBufferQueue = dispatch_queue_create("CameraMulticaster", DISPATCH_QUEUE_SERIAL); 

      AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init]; 
      [dataOutput setAlwaysDiscardsLateVideoFrames:YES]; 
      [dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_32BGRA)}]; 
      [dataOutput setSampleBufferDelegate:self queue:sampleBufferQueue]; 

      [_avSession addOutput:dataOutput]; 
      [_avSession commitConfiguration]; 
    } @catch (NSException *exception) { 
     NSLog(@"%s - %@", __PRETTY_FUNCTION__, exception.description); 
     return FALSE; 
    } @finally { 
     return TRUE; 
    } 

} 

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{ 
    CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    { 
     size_t width = CVPixelBufferGetWidth(pixelBuffer); 
     size_t height = CVPixelBufferGetHeight(pixelBuffer); 

     CVMetalTextureRef texture = NULL; 
     CVReturn status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, _textureCache, pixelBuffer, NULL, MTLPixelFormatBGRA8Unorm, width, height, 0, &texture); 
     if(status == kCVReturnSuccess) 
     { 
      _metalView.drawableSize = CGSizeMake(width, height); 
      _texture = CVMetalTextureGetTexture(texture); 
      _commandQueue = [_device newCommandQueue]; 
      CFRelease(texture); 
     } 
    } 
} 

- (void)drawInMTKView:(MTKView *)view { 
    // creating command encoder 
    if (_texture) { 
     id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer]; 
     id<MTLTexture> drawingTexture = view.currentDrawable.texture; 

     // set up and encode the filter 
     MPSImageGaussianBlur *filter = [[MPSImageGaussianBlur alloc] initWithDevice:_device sigma:5]; 

     [filter encodeToCommandBuffer:commandBuffer sourceTexture:_texture destinationTexture:drawingTexture]; 

     // committing the drawing 
     [commandBuffer presentDrawable:view.currentDrawable]; 
     [commandBuffer commit]; 
     _texture = nil; 
    } 
} 

- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size { 

} 

@end 
0

你应该尝试以下几点

创建新的渲染过程描述符的1.Instead之一,使用当前的渲染过程描述符对象从MTKView object.this渲染过程描述符已经将configured.you不需要设置anything.try如下─

if let currentPassDesc = view.currentRenderPassDescriptor, 
let currentDrawable = view.currentDrawable 
{ 
let renderCommandEncoder =   

commandBuffer.makeRenderCommandEncoder(descriptor: currentPassDesc) 


renderCommandEncoder.setRenderPipelineState(renderPipeline) 

//set vertex buffers and call draw apis 
....... 
....... 
commandBuffer.present(currentDrawable) 

} 

2.you给出的示例代码创建新的渲染过程的描述符,然后通过可绘制对象的纹理设置其颜色附着所以而不是doi在这个你应该创建一个新的纹理对象,然后设置这个纹理作为渲染目标的使用,然后你会得到在你的新纹理呈现的内容,但它不会显示在屏幕上,以显示你必须的内容复制可绘制纹理中的纹理内容,然后呈现drawable。

下面

是制备渲染目标的代码 -

renderPassDescriptor.colorAttachments[0].clearColor = 

MTLClearColor(red: 

0.0,green: 0.0,blue: 0.0,alpha: 1.0) 
renderPassDescriptor.colorAttachments[0].loadAction = .clear 
renderPassDescriptor.colorAttachments[0].storeAction = .store 

renderPassDescriptor.depthAttachment.clearDepth = 1.0 
renderPassDescriptor.depthAttachment.loadAction = .clear 
renderPassDescriptor.depthAttachment.storeAction = .dontCare 

let view = self.view as!MTKView 
let textDesc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: 

.bgra8Unorm, width: Int(view.frame.width), 
height: Int(view.frame.height), mipmapped: false) 
textDesc.depth = 1 
//see below line  
textDesc.usage = 
[MTLTextureUsage.renderTarget,MTLTextureUsage.shaderRead] 
textDesc.storageMode = .private 
mainPassFrameBuffer = device.makeTexture(descriptor: textDesc) 
renderPassDescriptor.colorAttachments[0].texture = mainPassFrameBuffer