2012-10-20 60 views
0

我正在使用Mat Gallager的音频流媒体播放音频流。在程序的另一个地方(一个不同的控制器),我试图从设备的麦克风中记录一些内容。播放流后音频队列无法录制音频

这是我的设置:

void SFIdentificator::startRecord() 

{ INT I,bufferByteSize; UInt32大小;

try { 
    numberOfPackets = 0; 

    // specify the recording format 
    SetupAudioFormat(kAudioFormatLinearPCM); 

    AudioQueueNewInput( &mRecordFormat, 
         MyInputBufferHandler, 
         this /* userData */, 
         CFRunLoopGetMain() /* run loop */, kCFRunLoopCommonModes /* run loop mode */, 
         0 /* flags */, &mQueue); 
    mRecordPacket = 0; 

    size = sizeof(mRecordFormat); 
    AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription, &mRecordFormat, &size); 

    bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second 


    size = sizeof(mRecordFormat); 
    XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription, 
             &mRecordFormat, &size), "couldn't get queue's format"); 

    for (i = 0; i < kNumberRecordBuffers; ++i) { 
     XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]), "AudioQueueAllocateBuffer failed"); 
     XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL), "AudioQueueEnqueueBuffer failed"); 
    } 
    mIsRunning = true; 

    XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed"); 


} catch (CAXException e) { 
    char buf[256]; 
    fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); 
}catch (...) { 
    fprintf(stderr, "An unknown error occurred\n");; 
} 

}

void SFIdentificator::SetupAudioFormat(UInt32 inFormatID) 

{ memset的(& mRecordFormat,0,的sizeof(mRecordFormat));

UInt32 size = sizeof(mRecordFormat.mSampleRate); 
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate, &size, &mRecordFormat.mSampleRate), "couldn't get hardware sample rate"); 

size = sizeof(mRecordFormat.mChannelsPerFrame); 
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels, &size, &mRecordFormat.mChannelsPerFrame), "couldn't get input channel count"); 

mRecordFormat.mFormatID = inFormatID; 
if (inFormatID == kAudioFormatLinearPCM){ 
    // if we want pcm, default to signed 16-bit little-endian 

    mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 
    //  mRecordFormat.mBitsPerChannel = 16; 


    mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel/8) * mRecordFormat.mChannelsPerFrame; 
    mRecordFormat.mFramesPerPacket = 1; 

    mRecordFormat.mFormatID   = kAudioFormatLinearPCM; 
    mRecordFormat.mSampleRate  = 32000.0; 
    mRecordFormat.mChannelsPerFrame = 1; 
    mRecordFormat.mBitsPerChannel = 16; 
    mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = mRecordFormat.mChannelsPerFrame * sizeof (SInt16); 
    mRecordFormat.mFramesPerPacket = 1; 
} 

}

UInt32 SFIdentificator::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds){ 
static const int maxBufferSize = 0x50000; 

int maxPacketSize = format->mBytesPerPacket; 
if (maxPacketSize == 0) { 
    UInt32 maxVBRPacketSize = sizeof(maxPacketSize); 
    AudioQueueGetProperty (mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize, &maxVBRPacketSize); 
} 

Float64 numBytesForTime = DataFormat().mSampleRate * maxPacketSize * seconds; 
// *outBufferSize = (UInt32)(numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize); 
return (UInt32)(numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize); 

}

看来,如果我使用AudioStreamer类第一,并尝试记录一些东西后,甚至没有回调被调用。但是如果我不先使用AudioStreamer,一切都很好。

任何人都可以指向正确的方向吗?

回答

0

回答我自己的问题,正确设置AudioSession对象的init()是一个问题。它应该只在每个会话中完成一次,并且再次执行会导致错误,我已经识别出无法记录流。但是,即使第二个init失败了,我也可以记录流(当然,第一个成功了)。