diff --git a/mycroft/client/speech/main.py b/mycroft/client/speech/main.py index 025e0727fe..619e523e1e 100644 --- a/mycroft/client/speech/main.py +++ b/mycroft/client/speech/main.py @@ -96,6 +96,13 @@ def handle_multi_utterance_intent_failure(event): mute_and_speak("Sorry, I didn't catch that. Please rephrase your request.") +def _trigger_expect_response(message): + """ + Makes mycroft start listening on 'recognizer_loop:audio_output_end' + """ + create_signal('startListening') + + def handle_speak(event): global _last_stop_signal @@ -104,7 +111,9 @@ def handle_speak(event): create_signal("isSpeaking") utterance = event.data['utterance'] - expect_response = event.data.get('expect_response', False) + + if event.data.get('expect_response', False): + ws.once('recognizer_loop:audio_output_end', _trigger_expect_response) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods @@ -133,9 +142,6 @@ def handle_speak(event): # This check will clear the "signal" check_for_signal("isSpeaking") - if expect_response: - create_signal('startListening') - def handle_sleep(event): loop.sleep()