2017-02-16 21 views
2

iOSアプリケーションを開発しています。これは2つのメインモジュール、つまりCore Audioのオーディオ解析モジュールベースと、 AudioKitを使用します。iOSコアオーディオライフサイクル - AVAudioIONodeImpl.mm:365 - 条件がfalse:hwFormat

これでオーディオ入力クラス:

import AVFoundation 

typealias AudioInputCallback = (
    _ timeStamp: Double, 
    _ numberOfFrames: Int, 
    _ samples: [Float] 
    ) -> Void 

/// Sets up an audio input session and notifies when new buffer data is available. 
class AudioInputUtility: NSObject { 

    private(set) var audioUnit: AudioUnit! 
    var audioSession : AVAudioSession = AVAudioSession.sharedInstance() 
    var sampleRate: Float 
    var numberOfChannels: Int 

    /// When true, performs DC offset rejection on the incoming buffer before invoking the audioInputCallback. 
    var shouldPerformDCOffsetRejection: Bool = false 

    private let outputBus: UInt32 = 0 
    private let inputBus: UInt32 = 1 
    private var audioInputCallback: AudioInputCallback! 

    /// Instantiate a AudioInput. 
    /// - Parameter audioInputCallback: Invoked when audio data is available. 
    /// - Parameter sampleRate: The sample rate to set up the audio session with. 
    /// - Parameter numberOfChannels: The number of channels to set up the audio session with. 

    init(audioInputCallback callback: @escaping AudioInputCallback, sampleRate: Float = 44100.0, numberOfChannels: Int = 1) { // default values if not specified 

     self.sampleRate = sampleRate 
     self.numberOfChannels = numberOfChannels 
     audioInputCallback = callback 
    } 

    /// Start recording. Prompts for access to microphone if necessary. 
    func startRecording() { 
     do { 

      if self.audioUnit == nil { 
       setupAudioSession() 
       setupAudioUnit() 
      } 

      try self.audioSession.setActive(true) 
      var osErr: OSStatus = 0 


      osErr = AudioUnitInitialize(self.audioUnit) 
      assert(osErr == noErr, "*** AudioUnitInitialize err \(osErr)") 
      osErr = AudioOutputUnitStart(self.audioUnit) 

      assert(osErr == noErr, "*** AudioOutputUnitStart err \(osErr)") 
     } catch { 
      print("*** startRecording error: \(error)") 
     } 
    } 

    /// Stop recording. 
    func stopRecording() { 
     do { 
      var osErr: OSStatus = 0 

      osErr = AudioOutputUnitStop(self.audioUnit) 
      osErr = AudioUnitUninitialize(self.audioUnit) 

      assert(osErr == noErr, "*** AudioUnitUninitialize err \(osErr)") 

      try self.audioSession.setActive(false) 

     } catch { 
      print("*** error: \(error)") 
     } 
    } 

    private let recordingCallback: AURenderCallback = { (inRefCon, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData) -> OSStatus in 

     let audioInput = unsafeBitCast(inRefCon, to: AudioInputUtility.self) 
     var osErr: OSStatus = 0 

     // We've asked CoreAudio to allocate buffers for us, so just set mData to nil and it will be populated on AudioUnitRender(). 
     var bufferList = AudioBufferList(
      mNumberBuffers: 1, 
      mBuffers: AudioBuffer(
       mNumberChannels: UInt32(audioInput.numberOfChannels), 
       mDataByteSize: 4, 
       mData: nil)) 

     osErr = AudioUnitRender(audioInput.audioUnit, 
           ioActionFlags, 
           inTimeStamp, 
           inBusNumber, 
           inNumberFrames, 
           &bufferList) 
     assert(osErr == noErr, "*** AudioUnitRender err \(osErr)") 


     // Move samples from mData into our native [Float] format. 
     var monoSamples = [Float]() 
     let ptr = bufferList.mBuffers.mData?.assumingMemoryBound(to: Float.self) 
     monoSamples.append(contentsOf: UnsafeBufferPointer(start: ptr, count: Int(inNumberFrames))) 

     if audioInput.shouldPerformDCOffsetRejection { 
      DCRejectionFilterProcessInPlace(&monoSamples, count: Int(inNumberFrames)) 
     } 

     // Not compatible with Obj-C... 
     audioInput.audioInputCallback(inTimeStamp.pointee.mSampleTime/Double(audioInput.sampleRate), 
             Int(inNumberFrames), 
             monoSamples) 

     return 0 
    } 

    private func setupAudioSession() { 

     if !audioSession.availableCategories.contains(AVAudioSessionCategoryRecord) { 
      print("can't record! bailing.") 
      return 
     } 

     do { 

      //https://developer.apple.com/reference/avfoundation/avaudiosession/1669963-audio_session_categories 
      try audioSession.setCategory(AVAudioSessionCategoryRecord) 

      // "Appropriate for applications that wish to minimize the effect of system-supplied signal processing for input and/or output audio signals." 
      // NB: This turns off the high-pass filter that CoreAudio normally applies. 


      try audioSession.setMode(AVAudioSessionModeMeasurement) 

      try audioSession.setPreferredSampleRate(Double(sampleRate)) 

      // NB: This is considered a 'hint' and more often than not is just ignored. 

      // number of seconds to record -> voglio 1024 samples 
      try audioSession.setPreferredIOBufferDuration(0.05) 

      audioSession.requestRecordPermission { (granted) -> Void in 
       if !granted { 
        print("*** record permission denied") 
       } 
      } 
     } catch { 
      print("*** audioSession error: \(error)") 
     } 
    } 

    private func setupAudioUnit() { 

     var componentDesc:AudioComponentDescription = AudioComponentDescription(
      componentType: OSType(kAudioUnitType_Output), 
      componentSubType: OSType(kAudioUnitSubType_RemoteIO), // Always this for iOS. 
      componentManufacturer: OSType(kAudioUnitManufacturer_Apple), 
      componentFlags: 0, 
      componentFlagsMask: 0) 

     var osErr: OSStatus = 0 

     // Get an audio component matching our description. 
     let component: AudioComponent! = AudioComponentFindNext(nil, &componentDesc) 
     assert(component != nil, "Couldn't find a default component") 

     // Create an instance of the AudioUnit 
     var tempAudioUnit: AudioUnit? 
     osErr = AudioComponentInstanceNew(component, &tempAudioUnit) 
     self.audioUnit = tempAudioUnit 

     assert(osErr == noErr, "*** AudioComponentInstanceNew err \(osErr)") 

     // Enable I/O for input. 
     var one:UInt32 = 1 

     osErr = AudioUnitSetProperty(audioUnit, 
            kAudioOutputUnitProperty_EnableIO, 
            kAudioUnitScope_Input, 
            inputBus, 
            &one, 
            UInt32(MemoryLayout<UInt32>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 


     osErr = AudioUnitSetProperty(audioUnit, 
            kAudioOutputUnitProperty_EnableIO, 
            kAudioUnitScope_Output, 
            outputBus, 
            &one, 
            UInt32(MemoryLayout<UInt32>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 


     // Set format to 32 bit, floating point, linear PCM 
     var streamFormatDesc:AudioStreamBasicDescription = AudioStreamBasicDescription(
      mSampleRate:  Double(sampleRate), 
      mFormatID:   kAudioFormatLinearPCM, 
      mFormatFlags:  kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved, // floating point data - docs say this is fastest 
      mBytesPerPacket: 4, 
      mFramesPerPacket: 1, 
      mBytesPerFrame:  4, 
      mChannelsPerFrame: UInt32(self.numberOfChannels), 
      mBitsPerChannel: 4 * 8, 
      mReserved: 0 
     ) 

     // Set format for input and output busses 

     osErr = AudioUnitSetProperty(audioUnit, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Input, outputBus, 
            &streamFormatDesc, 
            UInt32(MemoryLayout<AudioStreamBasicDescription>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 


     osErr = AudioUnitSetProperty(audioUnit, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Output, 
            inputBus, 
            &streamFormatDesc, 
            UInt32(MemoryLayout<AudioStreamBasicDescription>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 

     // Set up our callback. 
     var inputCallbackStruct = AURenderCallbackStruct(inputProc: recordingCallback, inputProcRefCon: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque())) 
     osErr = AudioUnitSetProperty(audioUnit, 
            AudioUnitPropertyID(kAudioOutputUnitProperty_SetInputCallback), 
            AudioUnitScope(kAudioUnitScope_Global), 
            inputBus, 
            &inputCallbackStruct, 
            UInt32(MemoryLayout<AURenderCallbackStruct>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 

     // Ask CoreAudio to allocate buffers for us on render. (This is true by default but just to be explicit about it...) 
     osErr = AudioUnitSetProperty(audioUnit, 
            AudioUnitPropertyID(kAudioUnitProperty_ShouldAllocateBuffer), 
            AudioUnitScope(kAudioUnitScope_Output), 
            inputBus, 
            &one, 
            UInt32(MemoryLayout<UInt32>.size)) 
     assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)") 
    } 
} 

private func DCRejectionFilterProcessInPlace(_ audioData: inout [Float], count: Int) { 

    let defaultPoleDist: Float = 0.975 
    var mX1: Float = 0 
    var mY1: Float = 0 

    for i in 0..<count { 
     let xCurr: Float = audioData[i] 
     audioData[i] = audioData[i] - mX1 + (defaultPoleDist * mY1) 
     mX1 = xCurr 
     mY1 = audioData[i] 
    } 
} 

これで出力クラス:

private func initPlayer(){ 
     do{ 

      /* 
      let audioSession : AVAudioSession = AVAudioSession.sharedInstance() 
      //try audioSession.setActive(false) 
      try audioSession.setCategory(AVAudioSessionCategoryPlayback) 
*/    

      // http://audiokit.io/playgrounds/Playback/Reading%20and%20Writing%20Audio%20Files/ 
      let file = try AKAudioFile(readFileName: self.soundPath, 
             baseDir: .resources) 

      self.player = try AKAudioPlayer(file: file) 

      //player options 
      self.player!.looping = true 




      AKSettings.playbackWhileMuted = true 
      try AKSettings.setSession(category: .playback) 
AudioKit.output = self.player 



     }catch{ 
      print("Unresolved error \(error)") 
     } 

    } 


public func stopMaskingSound(){ 

      if(player!.isPlaying){ 
       self.player!.stop() 
      } 

      if audioKitIsStarted == true{ 

       AudioKit.stop()    

       self.audioKitIsStarted = false 
      } 



     } 

あなたがオーディオ入力と出力が2つの異なるクラスによって管理されている見ることができるように。

私が持っている問題は、私は、この手順を行う場合のことである: 1)初期プレーヤーとレコードを - >)それ 2を停止し、出力を再生する - >停止して、私が持っている第三ステップで 3)REINITプレーヤー

この例外:

[central] 54: ERROR: [0x16dfc3000] >avae> AVAudioIONodeImpl.mm:365: _GetHWFormat: required condition is false: hwFormat 
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: hwFormat' 

誰もが知っていることは何ですか? AudioKit < - > Core Audioにライフサイクルの問題はありますか?

答えて

1

オーディオプロセスの一部が実際に別のスレッドで停止するため、オーディオユニットの停止と再起動が問題になることがあります。 1つの可能な回避策は、停止と再起動の間に約1秒の遅延を許容して、RemoteIOが非同期的に停止してから再起動することを可能にすることです。

+0

問題はより決定論的です...私は分析の部分に戻ろうとすると、この種のエラーが発生します。また、私が聴覚障害を閉鎖しなければ、私はこのエラーを受け取ります。 hwFormatとは何ですか? –