Speech recognition: crashing on quickly starting-ending?












0















I have implemented speech recognition to operate app on command basis.



But as user try to start-end speech recognition continuously for few time(less then 1 min), app crashing with error message:- libc++abi.dylib: terminating with uncaught exception of type NSException



App is crashing only when user is doing this very fast.



If trying normally start-end, its working fine.



Not able to figure out the reason.



**




  • Code snippet:-


**



Start Speech :-
/// Start voice recognition



*

func startListening() {

if AVAudioSession.sharedInstance().category == AVAudioSessionCategorySoloAmbient
{
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
}

audioEngine = AVAudioEngine()

if !(audioEngine?.isRunning)! {
startRecording()
//print("start listening............")
}
}


*



/// Start recording/listening
private func startRecording() {
guard let audio_engine = self.audioEngine else {return}

if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}

recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
let inputNode = audio_engine.inputNode


guard let recognitionRequest = recognitionRequest else {
// fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
print("fatalError : Unable to create an SFSpeechAudioBufferRecognitionRequest object")
return

}
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
if result != nil {
if let transcript = result?.bestTranscription.formattedString{
self.speechResponse!(transcript,.authorized,nil)
}
isFinal = (result?.isFinal)!
}
if error != nil || isFinal {
audio_engine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
if self.isContinueListening == true{
self.startRecording()
}
}
})

let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
audio_engine.prepare()
do {
try audio_engine.start()
} catch {
print("audioEngine couldn't start because of an error")
}

}


Stop Speech recognition:-
/// End voice recognition



func endListening() {
if AVAudioSession.sharedInstance().category != AVAudioSessionCategorySoloAmbient{
try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategorySoloAmbient )
try? AVAudioSession.sharedInstance().setActive(true)
}
if (audioEngine?.isRunning ?? false) {
audioEngine?.stop()
recognitionRequest?.endAudio()
if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}
recognitionRequest = nil
audioEngine = nil
// print("end listening............")
}
}


Even try to put some delay but no luck



Thanks in advance!










share|improve this question





























    0















    I have implemented speech recognition to operate app on command basis.



    But as user try to start-end speech recognition continuously for few time(less then 1 min), app crashing with error message:- libc++abi.dylib: terminating with uncaught exception of type NSException



    App is crashing only when user is doing this very fast.



    If trying normally start-end, its working fine.



    Not able to figure out the reason.



    **




    • Code snippet:-


    **



    Start Speech :-
    /// Start voice recognition



    *

    func startListening() {

    if AVAudioSession.sharedInstance().category == AVAudioSessionCategorySoloAmbient
    {
    let audioSession = AVAudioSession.sharedInstance()
    do {
    try audioSession.setCategory(AVAudioSessionCategoryRecord)
    try audioSession.setMode(AVAudioSessionModeMeasurement)
    try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
    } catch {
    print("audioSession properties weren't set because of an error.")
    }
    }

    audioEngine = AVAudioEngine()

    if !(audioEngine?.isRunning)! {
    startRecording()
    //print("start listening............")
    }
    }


    *



    /// Start recording/listening
    private func startRecording() {
    guard let audio_engine = self.audioEngine else {return}

    if recognitionTask != nil {
    recognitionTask?.cancel()
    recognitionTask = nil
    }

    recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
    let inputNode = audio_engine.inputNode


    guard let recognitionRequest = recognitionRequest else {
    // fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
    print("fatalError : Unable to create an SFSpeechAudioBufferRecognitionRequest object")
    return

    }
    recognitionRequest.shouldReportPartialResults = true
    recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
    var isFinal = false
    if result != nil {
    if let transcript = result?.bestTranscription.formattedString{
    self.speechResponse!(transcript,.authorized,nil)
    }
    isFinal = (result?.isFinal)!
    }
    if error != nil || isFinal {
    audio_engine.stop()
    inputNode.removeTap(onBus: 0)
    self.recognitionRequest = nil
    self.recognitionTask = nil
    if self.isContinueListening == true{
    self.startRecording()
    }
    }
    })

    let recordingFormat = inputNode.outputFormat(forBus: 0)
    inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
    self.recognitionRequest?.append(buffer)
    }
    audio_engine.prepare()
    do {
    try audio_engine.start()
    } catch {
    print("audioEngine couldn't start because of an error")
    }

    }


    Stop Speech recognition:-
    /// End voice recognition



    func endListening() {
    if AVAudioSession.sharedInstance().category != AVAudioSessionCategorySoloAmbient{
    try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategorySoloAmbient )
    try? AVAudioSession.sharedInstance().setActive(true)
    }
    if (audioEngine?.isRunning ?? false) {
    audioEngine?.stop()
    recognitionRequest?.endAudio()
    if recognitionTask != nil {
    recognitionTask?.cancel()
    recognitionTask = nil
    }
    recognitionRequest = nil
    audioEngine = nil
    // print("end listening............")
    }
    }


    Even try to put some delay but no luck



    Thanks in advance!










    share|improve this question



























      0












      0








      0








      I have implemented speech recognition to operate app on command basis.



      But as user try to start-end speech recognition continuously for few time(less then 1 min), app crashing with error message:- libc++abi.dylib: terminating with uncaught exception of type NSException



      App is crashing only when user is doing this very fast.



      If trying normally start-end, its working fine.



      Not able to figure out the reason.



      **




      • Code snippet:-


      **



      Start Speech :-
      /// Start voice recognition



      *

      func startListening() {

      if AVAudioSession.sharedInstance().category == AVAudioSessionCategorySoloAmbient
      {
      let audioSession = AVAudioSession.sharedInstance()
      do {
      try audioSession.setCategory(AVAudioSessionCategoryRecord)
      try audioSession.setMode(AVAudioSessionModeMeasurement)
      try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
      } catch {
      print("audioSession properties weren't set because of an error.")
      }
      }

      audioEngine = AVAudioEngine()

      if !(audioEngine?.isRunning)! {
      startRecording()
      //print("start listening............")
      }
      }


      *



      /// Start recording/listening
      private func startRecording() {
      guard let audio_engine = self.audioEngine else {return}

      if recognitionTask != nil {
      recognitionTask?.cancel()
      recognitionTask = nil
      }

      recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
      let inputNode = audio_engine.inputNode


      guard let recognitionRequest = recognitionRequest else {
      // fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
      print("fatalError : Unable to create an SFSpeechAudioBufferRecognitionRequest object")
      return

      }
      recognitionRequest.shouldReportPartialResults = true
      recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
      var isFinal = false
      if result != nil {
      if let transcript = result?.bestTranscription.formattedString{
      self.speechResponse!(transcript,.authorized,nil)
      }
      isFinal = (result?.isFinal)!
      }
      if error != nil || isFinal {
      audio_engine.stop()
      inputNode.removeTap(onBus: 0)
      self.recognitionRequest = nil
      self.recognitionTask = nil
      if self.isContinueListening == true{
      self.startRecording()
      }
      }
      })

      let recordingFormat = inputNode.outputFormat(forBus: 0)
      inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
      self.recognitionRequest?.append(buffer)
      }
      audio_engine.prepare()
      do {
      try audio_engine.start()
      } catch {
      print("audioEngine couldn't start because of an error")
      }

      }


      Stop Speech recognition:-
      /// End voice recognition



      func endListening() {
      if AVAudioSession.sharedInstance().category != AVAudioSessionCategorySoloAmbient{
      try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategorySoloAmbient )
      try? AVAudioSession.sharedInstance().setActive(true)
      }
      if (audioEngine?.isRunning ?? false) {
      audioEngine?.stop()
      recognitionRequest?.endAudio()
      if recognitionTask != nil {
      recognitionTask?.cancel()
      recognitionTask = nil
      }
      recognitionRequest = nil
      audioEngine = nil
      // print("end listening............")
      }
      }


      Even try to put some delay but no luck



      Thanks in advance!










      share|improve this question
















      I have implemented speech recognition to operate app on command basis.



      But as user try to start-end speech recognition continuously for few time(less then 1 min), app crashing with error message:- libc++abi.dylib: terminating with uncaught exception of type NSException



      App is crashing only when user is doing this very fast.



      If trying normally start-end, its working fine.



      Not able to figure out the reason.



      **




      • Code snippet:-


      **



      Start Speech :-
      /// Start voice recognition



      *

      func startListening() {

      if AVAudioSession.sharedInstance().category == AVAudioSessionCategorySoloAmbient
      {
      let audioSession = AVAudioSession.sharedInstance()
      do {
      try audioSession.setCategory(AVAudioSessionCategoryRecord)
      try audioSession.setMode(AVAudioSessionModeMeasurement)
      try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
      } catch {
      print("audioSession properties weren't set because of an error.")
      }
      }

      audioEngine = AVAudioEngine()

      if !(audioEngine?.isRunning)! {
      startRecording()
      //print("start listening............")
      }
      }


      *



      /// Start recording/listening
      private func startRecording() {
      guard let audio_engine = self.audioEngine else {return}

      if recognitionTask != nil {
      recognitionTask?.cancel()
      recognitionTask = nil
      }

      recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
      let inputNode = audio_engine.inputNode


      guard let recognitionRequest = recognitionRequest else {
      // fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
      print("fatalError : Unable to create an SFSpeechAudioBufferRecognitionRequest object")
      return

      }
      recognitionRequest.shouldReportPartialResults = true
      recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
      var isFinal = false
      if result != nil {
      if let transcript = result?.bestTranscription.formattedString{
      self.speechResponse!(transcript,.authorized,nil)
      }
      isFinal = (result?.isFinal)!
      }
      if error != nil || isFinal {
      audio_engine.stop()
      inputNode.removeTap(onBus: 0)
      self.recognitionRequest = nil
      self.recognitionTask = nil
      if self.isContinueListening == true{
      self.startRecording()
      }
      }
      })

      let recordingFormat = inputNode.outputFormat(forBus: 0)
      inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
      self.recognitionRequest?.append(buffer)
      }
      audio_engine.prepare()
      do {
      try audio_engine.start()
      } catch {
      print("audioEngine couldn't start because of an error")
      }

      }


      Stop Speech recognition:-
      /// End voice recognition



      func endListening() {
      if AVAudioSession.sharedInstance().category != AVAudioSessionCategorySoloAmbient{
      try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategorySoloAmbient )
      try? AVAudioSession.sharedInstance().setActive(true)
      }
      if (audioEngine?.isRunning ?? false) {
      audioEngine?.stop()
      recognitionRequest?.endAudio()
      if recognitionTask != nil {
      recognitionTask?.cancel()
      recognitionTask = nil
      }
      recognitionRequest = nil
      audioEngine = nil
      // print("end listening............")
      }
      }


      Even try to put some delay but no luck



      Thanks in advance!







      ios swift speech-recognition speech






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 24 '18 at 13:20









      wvteijlingen

      8,22212544




      8,22212544










      asked Nov 24 '18 at 6:54









      RavikumarRavikumar

      267




      267
























          0






          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53455900%2fspeech-recognition-crashing-on-quickly-starting-ending%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53455900%2fspeech-recognition-crashing-on-quickly-starting-ending%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Basket-ball féminin

          Different font size/position of beamer's navigation symbols template's content depending on regular/plain...

          I want to find a topological embedding $f : X rightarrow Y$ and $g: Y rightarrow X$, yet $X$ is not...