我需要以小块播放通过套接字播放的原始音频数据。我已经读过,我想使用循环缓冲区,但在Objective C中找到了一些解决方案,但无法使它们中的任何一个工作,尤其是在Swift 3中。
任何人都可以帮助我吗?如何在Swift中播放来自套接字的原始音频数据
1
A
回答
0
首先你执行ring buffer就像这样。
public struct RingBuffer<T> {
private var array: [T?]
private var readIndex = 0
private var writeIndex = 0
public init(count: Int) {
array = [T?](repeating: nil, count: count)
}
/* Returns false if out of space. */
@discardableResult public mutating func write(element: T) -> Bool {
if !isFull {
array[writeIndex % array.count] = element
writeIndex += 1
return true
} else {
return false
}
}
/* Returns nil if the buffer is empty. */
public mutating func read() -> T? {
if !isEmpty {
let element = array[readIndex % array.count]
readIndex += 1
return element
} else {
return nil
}
}
fileprivate var availableSpaceForReading: Int {
return writeIndex - readIndex
}
public var isEmpty: Bool {
return availableSpaceForReading == 0
}
fileprivate var availableSpaceForWriting: Int {
return array.count - availableSpaceForReading
}
public var isFull: Bool {
return availableSpaceForWriting == 0
}
}
之后,你就像这样实现音频单元。 (根据需要进行修改)
class ToneGenerator {
fileprivate var toneUnit: AudioUnit? = nil
init() {
setupAudioUnit()
}
deinit {
stop()
}
func setupAudioUnit() {
// Configure the description of the output audio component we want to find:
let componentSubtype: OSType
#if os(OSX)
componentSubtype = kAudioUnitSubType_DefaultOutput
#else
componentSubtype = kAudioUnitSubType_RemoteIO
#endif
var defaultOutputDescription = AudioComponentDescription(componentType: kAudioUnitType_Output,
componentSubType: componentSubtype,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0)
let defaultOutput = AudioComponentFindNext(nil, &defaultOutputDescription)
var err: OSStatus
// Create a new instance of it in the form of our audio unit:
err = AudioComponentInstanceNew(defaultOutput!, &toneUnit)
assert(err == noErr, "AudioComponentInstanceNew failed")
// Set the render callback as the input for our audio unit:
var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback as? AURenderCallback,
inputProcRefCon: nil)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&renderCallbackStruct,
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
assert(err == noErr, "AudioUnitSetProperty SetRenderCallback failed")
// Set the stream format for the audio unit. That is, the format of the data that our render callback will provide.
var streamFormat = AudioStreamBasicDescription(mSampleRate: Float64(sampleRate),
mFormatID: kAudioFormatLinearPCM,
mFormatFlags: kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved,
mBytesPerPacket: 4 /*four bytes per float*/,
mFramesPerPacket: 1,
mBytesPerFrame: 4,
mChannelsPerFrame: 1,
mBitsPerChannel: 4*8,
mReserved: 0)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
assert(err == noErr, "AudioUnitSetProperty StreamFormat failed")
}
func start() {
var status: OSStatus
status = AudioUnitInitialize(toneUnit!)
status = AudioOutputUnitStart(toneUnit!)
assert(status == noErr)
}
func stop() {
AudioOutputUnitStop(toneUnit!)
AudioUnitUninitialize(toneUnit!)
}
}
这是固定值
private let sampleRate = 16000
private let amplitude: Float = 1.0
private let frequency: Float = 440
/// Theta is changed over time as each sample is provided.
private var theta: Float = 0.0
private func renderCallback(_ inRefCon: UnsafeMutableRawPointer,
ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,
inTimeStamp: UnsafePointer<AudioTimeStamp>,
inBusNumber: UInt32,
inNumberFrames: UInt32,
ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus {
let abl = UnsafeMutableAudioBufferListPointer(ioData)
let buffer = abl[0]
let pointer: UnsafeMutableBufferPointer<Float32> = UnsafeMutableBufferPointer(buffer)
for frame in 0..<inNumberFrames {
let pointerIndex = pointer.startIndex.advanced(by: Int(frame))
pointer[pointerIndex] = sin(theta) * amplitude
theta += 2.0 * Float(M_PI) * frequency/Float(sampleRate)
}
return noErr
}
你需要把数据在一个循环缓冲区,然后播放声音。
+0
p.s.这是来自udp的原始音频的代码 PCM 16000 频率440 –
+0
你如何在这里读取缓冲区并发送播放?我正试图理解你的代码 –
相关问题
- 1. IO直接播放来自UDP流(NSData)的原始音频
- 2. 原始音频播放5
- 3. iPhone,以字节形式播放原始数据作为音频
- 4. 将音频播放到套接字node.js
- 5. 音频标记中的原始缓冲区数据播放
- 6. 无法播放原始音频文件
- 7. Android从C++端播放原始音频
- 8. 用QPython audiostream原始PCM音频播放
- 9. 如何在播放时处理ipod库音频文件原始数据
- 10. 如何播放基本原始的端口音频
- 11. 原始声音播放
- 12. iPhone中的原生音频播放器
- 13. 使用音频队列服务通过套接字连接播放PCM数据
- 14. 播放字符串数据。 (播放字符串值为音频)
- 15. 在Win8中播放原始PCM波形音频(用户预览)
- 16. 如何播放音频先完成音频文件,然后开始播放?
- 17. 如何将通过套接字接收的数据放入音频队列并播放它
- 18. 在swift中自动播放声音
- 19. 跳转来播放音频
- 20. 如何制作音频自动播放
- 21. 如何自动播放音频文件?
- 22. 如何消除来自使用iOS中的音频单元的套接字的音频中的噪音?
- 23. 如何在ios swift中播放视频
- 24. 在C++/Ubuntu中录制来自音频输入的原始音频
- 25. angularjs播放来自Firebase链接的音频
- 26. 如何向AudioQueue提供音频缓冲区来播放音频?
- 27. 如何排列一组来自bash的原始音频文件
- 28. 如何在elm中播放音频
- 29. 如何在Cocos2D中播放音频javascript
- 30. 开始播放音频流,在Symbian
您是否知道传入音频数据的格式? – dave234
@Dave我只是得到字节,所以格式应该是PCM –