2016-03-05 103 views
0

我有以下AUGraph代码,它将SineWave发生器连接到多声道调音台,输出设备,这是好的,但是,当您播放时,生成的音色似乎已损坏......它是几乎在那里,但有些事情是错误的。AUGraph正弦波损坏

任何想法?

import Cocoa 
import CoreAudio 
import AudioToolbox 
import AudioUnit 
import AVFoundation 


let sampleRate:Float64 = 41000.0 

class ViewController: NSViewController { 


    var t = 0 
    let callback2:AURenderCallback = { 
     (inRefCon: UnsafeMutablePointer<Void>, ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, inTimeStamp: UnsafePointer<AudioTimeStamp>, inBusNumber: UInt32, inNumberFrames: UInt32, ioData: UnsafeMutablePointer<AudioBufferList>) 
     in 

     let delta:Float = Float(880 * 2 * M_PI/sampleRate) 
     let abl = UnsafeMutableAudioBufferListPointer(ioData) 
     var x:Float = 0 
     for buffer:AudioBuffer in abl { 
      //x = self._x 

      memset(buffer.mData, 0, Int(buffer.mDataByteSize)) 

      let s = sizeof(Float) 
      let r = sizeof(Float32) 
      let f = abl.count 
      let buf:UnsafeMutablePointer<Float> = unsafeBitCast(buffer.mData, UnsafeMutablePointer<Float>.self) 
      for var i:Int = 0; i < Int(inNumberFrames); i++ 
      { 
       buf[i] = sin(x) 
       x += delta 
      } 

      memcpy(buffer.mData, buf, Int(buffer.mDataByteSize)); 

      let x = 0 
     } 

     return noErr 
    } 


    struct MyAUGraphPlayer 
    { 
     var streamFormat:AudioStreamBasicDescription! 

     var graph:AUGraph = AUGraph() 

     var outputNode:AUNode! 
     var mixerNode:AUNode! 

     var outputUnit:AudioUnit! 
     var mixerUnit:AudioUnit! 

     var firstOutputSampleTime:Float64 = 0.0 


     init() 
     { 

     } 
    } 

    func addAUNode(graph:AUGraph, inout desc:AudioComponentDescription) -> AUNode 
    { 
     var outputNode:AUNode = AUNode() 
     let x = AUGraphAddNode(graph, &desc, &outputNode) 
     print("x: \(x)") 
     return outputNode 
    } 



    override func viewDidLoad() { 
     super.viewDidLoad() 

     doit() 
    } 



    func doit() 
    { 

     var desc:AudioStreamBasicDescription = AudioStreamBasicDescription() 
     desc.mSampleRate  = sampleRate 
     desc.mFormatID   = kAudioFormatLinearPCM 
     desc.mFormatFlags  = kAudioFormatFlagsNativeFloatPacked 
     desc.mFramesPerPacket = 1 
     desc.mChannelsPerFrame = 2 
     desc.mBitsPerChannel = UInt32(sizeof(Float32) * 8) 
     desc.mBytesPerFrame = desc.mChannelsPerFrame * (desc.mBitsPerChannel/8) 
     desc.mBytesPerPacket = desc.mBytesPerFrame * desc.mFramesPerPacket 



     var graph:AUGraph = AUGraph() 
     var outputNode:AUNode = AUNode() 
     var mixerNode:AUNode = AUNode() 
     var outputUnit:AudioUnit = AudioUnit() 
     var mixerUnit:AudioUnit = AudioUnit() 

     let error = NewAUGraph(&graph) 
     print("error: \(error)") 

     // Output 
     var outputDesc:AudioComponentDescription = AudioComponentDescription(componentType: OSType(kAudioUnitType_Output), 
      componentSubType: OSType(kAudioUnitSubType_DefaultOutput), 
      componentManufacturer: OSType(kAudioUnitManufacturer_Apple), 
      componentFlags: 0, 
      componentFlagsMask: 0) 

     let a1 = AUGraphAddNode(graph, &outputDesc, &outputNode) 
     print("a1: \(a1)") 


     // Mixer 
     var mixerDesc:AudioComponentDescription = AudioComponentDescription(componentType: OSType(kAudioUnitType_Mixer), componentSubType: OSType(kAudioUnitSubType_StereoMixer), componentManufacturer: OSType(kAudioUnitManufacturer_Apple), componentFlags: 0, componentFlagsMask: 0) 

     let b1 = AUGraphAddNode(graph, &mixerDesc, &mixerNode) 
     print("b1: \(b1)") 


     // Connect nodes 
     let y = AUGraphConnectNodeInput(graph, 
      mixerNode, 
      0, 
      outputNode, 
      0) 
     print("y: \(y)") 


     // open 
     let open = AUGraphOpen(graph) 
     print("graph should be open: \(open)") 


     let ufa = AUGraphNodeInfo(graph, mixerNode, nil, &mixerUnit); 
     print("ufa: \(ufa)") 

     let uf = AUGraphNodeInfo(graph, outputNode, nil, &outputUnit); 
     print("uf: \(uf)") 



     // output 
//  let w = AudioUnitSetProperty(outputUnit, 
//   kAudioUnitProperty_StreamFormat, 
//   kAudioUnitScope_Output, 
//   0, 
//   &desc, 
//   UInt32(sizeof(AudioStreamBasicDescription))) 
//  print("w: \(w)") 

//  let w2 = AudioUnitSetProperty(outputUnit, 
//   kAudioUnitProperty_StreamFormat, 
//   kAudioUnitScope_Input, 
//   0, 
//   &desc, 
//   UInt32(sizeof(AudioStreamBasicDescription))) 
//  print("w2: \(w2)") 
// 

     var numbuses:UInt32 = 1 


//  let gg = AudioUnitSetProperty(mixerUnit, 
//   kAudioUnitProperty_ElementCount, 
//   kAudioUnitScope_Input, 
//   0, 
//   &numbuses, 
//   UInt32(sizeof(UInt32))) 
//  print("gg: \(gg)") 

//    let s = sizeof(UInt32) 
//    numbuses = 1 
//    let gg2 = AudioUnitSetProperty(mixerUnit, 
//     kAudioUnitProperty_ElementCount, 
//     kAudioUnitScope_Output, 
//     0, 
//     &numbuses, 
//     UInt32(sizeof(UInt32))) 
//    print("gg2: \(gg2)") 


//  let m1 = AudioUnitSetProperty(mixerUnit, 
//   kAudioUnitProperty_StreamFormat, 
//   kAudioUnitScope_Output, 
//   0, 
//   &desc, 
//   UInt32(sizeof(AudioStreamBasicDescription))) 
//  print("m1: \(m1)") 


     numbuses = 1 
     for (var i:UInt32 = 0; i < numbuses; ++i) 
     { 
      // 



      // 
      let yy = AudioUnitSetParameter(mixerUnit, kMultiChannelMixerParam_Volume, kAudioUnitScope_Output, i, 1, 0); 
      print("yy: \(yy)") 

      let xx = AudioUnitSetParameter(mixerUnit, kMultiChannelMixerParam_Volume, kAudioUnitScope_Input, i, 1, 0); 
      print("xx: \(xx)") 

      let zz = AudioUnitSetParameter(mixerUnit, kMultiChannelMixerParam_Enable, kAudioUnitScope_Input, i, 1, 0); 
      print("zz: \(zz)") 

      let aa = AudioUnitSetParameter(mixerUnit, kMultiChannelMixerParam_Enable, kAudioUnitScope_Output, i, 1, 0); 
      print("aa: \(aa)") 

      var rcbs:AURenderCallbackStruct = AURenderCallbackStruct(inputProc: callback2, inputProcRefCon: &graph) 

      let result1 = AUGraphSetNodeInputCallback(graph, mixerNode, i, &rcbs) 
      print("result1: \(result1)") 




//   let sf1 = AudioUnitSetProperty(mixerUnit, 
//    kAudioUnitProperty_StreamFormat, 
//    kAudioUnitScope_Input, 
//    i, 
//    &desc, 
//    UInt32(sizeof(AudioStreamBasicDescription))) 
//   print("sf1: \(sf1)") 
// 
//   let sf2 = AudioUnitSetProperty(mixerUnit, 
//    kAudioUnitProperty_StreamFormat, 
//    kAudioUnitScope_Output, 
//    i, 
//    &desc, 
//    UInt32(sizeof(AudioStreamBasicDescription))) 
//   print("sf2: \(sf2)") 



     } 


//  let sf2 = AudioUnitSetProperty(mixerUnit, 
//   kAudioUnitProperty_StreamFormat, 
//   kAudioUnitScope_Output, 
//   0, 
//   &desc, 
//   UInt32(sizeof(AudioStreamBasicDescription))) 
//  print("sf2: \(sf2)") 
//   
//   
//  let o1 = AudioUnitSetProperty(outputUnit, 
//   kAudioUnitProperty_StreamFormat, 
//   kAudioUnitScope_Output, 
//   1, 
//   &desc, 
//   UInt32(sizeof(AudioStreamBasicDescription))) 
//  print("o1: \(o1)") 
//   



     let yu = AUGraphInitialize(graph) 
     print("yu: \(yu)") 


     let ee = AUGraphStart(graph) 
     print("ee: \(ee)") 




     CAShow(UnsafeMutablePointer(graph)) 
    } 


} 
+0

我工作的Swift v3正弦波音频发生器代码在这里:https://gist.github.com/hotpaw2/630a466cc830e3d129b9 – hotpaw2

回答

2

您要求每帧2个通道,但每个数据包只有1个通道足够的字节数。检查音频属性设置器上的错误返回值。

+0

嗨hotpaw2,请您进一步阐述一下吗?我是否需要修改以下核心,或别的地方: outStreamDesc.mChannelsPerFrame = 2 outStreamDesc.mFramesPerPacket = 1 outStreamDesc.mBitsPerChannel = UInt32的(的sizeof(浮点32)* 8) outStreamDesc.mBytesPerPacket = UInt32的(的sizeof(浮点32 )) outStreamDesc.mBytesPerFrame = UInt32(sizeof(Float32)) – Chris

+0

是的,您的ABSD可能是问题。 – hotpaw2