2017-10-28 122 views
1

我需要以小块播放通过套接字播放的原始音频数据。我已经读过,我想使用循环缓冲区,但在Objective C中找到了一些解决方案,但无法使它们中的任何一个工作,尤其是在Swift 3中。
任何人都可以帮助我吗?如何在Swift中播放来自套接字的原始音频数据

+0

您是否知道传入音频数据的格式? – dave234

+0

@Dave我只是得到字节,所以格式应该是PCM –

回答

0

首先你执行ring buffer就像这样。

public struct RingBuffer<T> { 
    private var array: [T?] 
    private var readIndex = 0 
    private var writeIndex = 0 

    public init(count: Int) { 
    array = [T?](repeating: nil, count: count) 
    } 

    /* Returns false if out of space. */ 
    @discardableResult public mutating func write(element: T) -> Bool { 
    if !isFull { 
     array[writeIndex % array.count] = element 
     writeIndex += 1 
     return true 
    } else { 
     return false 
    } 
    } 

    /* Returns nil if the buffer is empty. */ 
    public mutating func read() -> T? { 
    if !isEmpty { 
     let element = array[readIndex % array.count] 
     readIndex += 1 
     return element 
    } else { 
     return nil 
    } 
    } 

    fileprivate var availableSpaceForReading: Int { 
    return writeIndex - readIndex 
    } 

    public var isEmpty: Bool { 
    return availableSpaceForReading == 0 
    } 

    fileprivate var availableSpaceForWriting: Int { 
    return array.count - availableSpaceForReading 
    } 

    public var isFull: Bool { 
    return availableSpaceForWriting == 0 
    } 
} 

之后,你就像这样实现音频单元。 (根据需要进行修改)

class ToneGenerator { 
    fileprivate var toneUnit: AudioUnit? = nil 

    init() { 
     setupAudioUnit() 
    } 

    deinit { 
     stop() 
    } 

    func setupAudioUnit() { 

     // Configure the description of the output audio component we want to find: 
     let componentSubtype: OSType 
     #if os(OSX) 
      componentSubtype = kAudioUnitSubType_DefaultOutput 
     #else 
      componentSubtype = kAudioUnitSubType_RemoteIO 
     #endif 
     var defaultOutputDescription = AudioComponentDescription(componentType: kAudioUnitType_Output, 
                   componentSubType: componentSubtype, 
                   componentManufacturer: kAudioUnitManufacturer_Apple, 
                   componentFlags: 0, 
                   componentFlagsMask: 0) 
     let defaultOutput = AudioComponentFindNext(nil, &defaultOutputDescription) 

     var err: OSStatus 

     // Create a new instance of it in the form of our audio unit: 
     err = AudioComponentInstanceNew(defaultOutput!, &toneUnit) 
     assert(err == noErr, "AudioComponentInstanceNew failed") 

     // Set the render callback as the input for our audio unit: 
     var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback as? AURenderCallback, 
                  inputProcRefCon: nil) 
     err = AudioUnitSetProperty(toneUnit!, 
            kAudioUnitProperty_SetRenderCallback, 
            kAudioUnitScope_Input, 
            0, 
            &renderCallbackStruct, 
            UInt32(MemoryLayout<AURenderCallbackStruct>.size)) 
     assert(err == noErr, "AudioUnitSetProperty SetRenderCallback failed") 

     // Set the stream format for the audio unit. That is, the format of the data that our render callback will provide. 
     var streamFormat = AudioStreamBasicDescription(mSampleRate: Float64(sampleRate), 
                 mFormatID: kAudioFormatLinearPCM, 
                 mFormatFlags: kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved, 
                 mBytesPerPacket: 4 /*four bytes per float*/, 
      mFramesPerPacket: 1, 
      mBytesPerFrame: 4, 
      mChannelsPerFrame: 1, 
      mBitsPerChannel: 4*8, 
      mReserved: 0) 
     err = AudioUnitSetProperty(toneUnit!, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Input, 
            0, 
            &streamFormat, 
            UInt32(MemoryLayout<AudioStreamBasicDescription>.size)) 
     assert(err == noErr, "AudioUnitSetProperty StreamFormat failed") 

    } 

    func start() { 
     var status: OSStatus 
     status = AudioUnitInitialize(toneUnit!) 
     status = AudioOutputUnitStart(toneUnit!) 
     assert(status == noErr) 
    } 

    func stop() { 
     AudioOutputUnitStop(toneUnit!) 
     AudioUnitUninitialize(toneUnit!) 
    } 

} 

这是固定值

private let sampleRate = 16000 
private let amplitude: Float = 1.0 
private let frequency: Float = 440 

/// Theta is changed over time as each sample is provided. 
private var theta: Float = 0.0 


private func renderCallback(_ inRefCon: UnsafeMutableRawPointer, 
          ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, 
          inTimeStamp: UnsafePointer<AudioTimeStamp>, 
          inBusNumber: UInt32, 
          inNumberFrames: UInt32, 
          ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus { 
    let abl = UnsafeMutableAudioBufferListPointer(ioData) 
    let buffer = abl[0] 
    let pointer: UnsafeMutableBufferPointer<Float32> = UnsafeMutableBufferPointer(buffer) 
    for frame in 0..<inNumberFrames { 
     let pointerIndex = pointer.startIndex.advanced(by: Int(frame)) 
     pointer[pointerIndex] = sin(theta) * amplitude 
     theta += 2.0 * Float(M_PI) * frequency/Float(sampleRate) 
    } 
    return noErr 
} 

你需要把数据在一个循环缓冲区,然后播放声音。

+0

p.s.这是来自udp的原始音频的代码 PCM 16000 频率440 –

+0

你如何在这里读取缓冲区并发送播放?我正试图理解你的代码 –