2012-04-23 326 views
6

我在以下上下文中收到来自AudioUnitRender的错误-50(无效参数)。我使用这个Pitch Detector示例应用程序作为我的出发点,它工作正常。我的项目唯一的主要区别是我还使用远程I/O单元进行音频输出。音频输出正常工作。这里是我的输入回调和我的初始化代码(为了简洁,删除了错误检查)。我知道这很多,但是错误-50真的给我提供了关于问题出在哪里的信息。AudioUnitRender发生错误-50

输入回调:

OSStatus inputCallback(void* inRefCon, 
          AudioUnitRenderActionFlags *ioActionFlags, 
          const AudioTimeStamp  *inTimeStamp, 
          UInt32      inBusNumber, 
          UInt32      inNumberFrames, 
          AudioBufferList    *ioData) { 

    WBAudio* audioObject= (WBAudio*)inRefCon; 

    AudioUnit rioUnit = audioObject->m_audioUnit; 
    OSStatus renderErr; 
    UInt32 bus1 = 1; 

    renderErr = AudioUnitRender(rioUnit, ioActionFlags, 
           inTimeStamp, bus1, inNumberFrames, audioObject->m_inBufferList); 
    if (renderErr < 0) { 
     return renderErr; // breaks here 
    } 

    return noErr; 
} // end inputCallback() 

初始化:

- (id) init { 

    self= [super init]; 
    if(!self) return nil; 

    OSStatus result; 

    //! Initialize a buffer list for rendering input 
    size_t bytesPerSample; 
    bytesPerSample = sizeof(SInt16); 
    m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer)); 
    m_inBufferList->mNumberBuffers = 1; 
    m_inBufferList->mBuffers[0].mNumberChannels = 1; 
    m_inBufferList->mBuffers[0].mDataByteSize = 512*bytesPerSample; 
    m_inBufferList->mBuffers[0].mData = calloc(512, bytesPerSample); 

    //! Initialize an audio session to get buffer size 
    result = AudioSessionInitialize(NULL, NULL, NULL, NULL); 

    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord; 
    result = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory); 

    // Set preferred buffer size 
    Float32 preferredBufferSize = static_cast<float>(m_pBoard->m_uBufferSize)/m_pBoard->m_fSampleRate; 
    result = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize); 

    // Get actual buffer size 
    Float32 audioBufferSize; 
    UInt32 size = sizeof (audioBufferSize); 
    result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &audioBufferSize); 

    result = AudioSessionSetActive(true); 

    //! Create our Remote I/O component description 
    AudioComponentDescription desc; 
    desc.componentType= kAudioUnitType_Output; 
    desc.componentSubType= kAudioUnitSubType_RemoteIO; 
    desc.componentFlags= 0; 
    desc.componentFlagsMask= 0; 
    desc.componentManufacturer= kAudioUnitManufacturer_Apple; 

    //! Find the corresponding component 
    AudioComponent outputComponent = AudioComponentFindNext(NULL, &desc); 

    //! Create the component instance 
    result = AudioComponentInstanceNew(outputComponent, &m_audioUnit); 

    //! Enable audio output 
    UInt32 flag = 1; 
    result = AudioUnitSetProperty(m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); 

    //! Enable audio input 
    result= AudioUnitSetProperty(m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); 

    //! Create our audio stream description 
    m_audioFormat.mSampleRate= m_pBoard->m_fSampleRate; 
    m_audioFormat.mFormatID= kAudioFormatLinearPCM; 
    m_audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; 
    m_audioFormat.mFramesPerPacket= 1; 
    m_audioFormat.mChannelsPerFrame= 1; 
    m_audioFormat.mBitsPerChannel= 16; 
    m_audioFormat.mBytesPerPacket= 2; 
    m_audioFormat.mBytesPerFrame= 2; 

    //! Set the stream format 
    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &m_audioFormat, sizeof(m_audioFormat)); 

    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_StreamFormat, 
           kAudioUnitScope_Output, 
           kInputBus, &m_audioFormat, sizeof(m_audioFormat)); 

    //! Set the render callback 
    AURenderCallbackStruct renderCallbackStruct= {0}; 
    renderCallbackStruct.inputProc= renderCallback; 
    renderCallbackStruct.inputProcRefCon= m_pBoard; 
    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallbackStruct, sizeof(renderCallbackStruct)); 

    //! Set the input callback 
    AURenderCallbackStruct inputCallbackStruct = {0}; 
    inputCallbackStruct.inputProc= inputCallback; 
    inputCallbackStruct.inputProcRefCon= self; 
    result= AudioUnitSetProperty(m_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, kOutputBus, &inputCallbackStruct, sizeof(inputCallbackStruct)); 

    //! Initialize the unit 
    result = AudioUnitInitialize(m_audioUnit); 

    return self; 
} 
+1

检查,以确保您的BUS1和inBusNumber是相同的。您所提到的网站也没有音调检测器的代码,只有错误标记的峰值频率检测器,这在音调估计时往往会失败。 – hotpaw2 2012-04-24 02:03:43

+0

他们是一样的。我肯定会用一粒盐来进行球场检测。 – Luke 2012-04-24 15:49:33

+0

你能解决吗?我遇到的问题通常是对AudioUnitRender的调用成功,但是当我的应用程序从后台返回时,它会失败,因为输入回调会请求更多字节。我的第一个想法是,它与我的缓冲区大小有关,但增加它们并没有帮助 – tomk 2012-06-22 14:31:05

回答

0

您分配m_inBufferList为:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer)); 

这应该是:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * numberOfBuffers); //numberOfBuffers in your case is 1 

也许这可以解决你的问题。

+0

您在某处丢失了括号。 – 2012-04-30 11:29:41

+0

好,但错误仍然存​​在。 – Luke 2012-05-03 15:12:37

+2

由于他只需要1个缓冲区,所以可以跳过第二个sizeof,因为AudioBufferList的声明已经分配了一个mbuffer – tomk 2012-06-22 14:28:25

1

错误-50在开发文档意味着PARAMS错误, 请确保您有传入AudioUnitRender正确PARAMS。 检查流格式和设备上