Issues-4 - Fix pep8 errors.

pull/16/head
Leo Arias 2016-05-20 22:15:53 +00:00
parent 9aa9f47f72
commit d618676089
69 changed files with 852 additions and 424 deletions

View File

@ -12,4 +12,6 @@ install:
- pip install -r requirements.txt
- pip install -r test-requirements.txt
# command to run tests
script: "PYTHONPATH=. python test/test_runner.py --fail-on-error"
script:
- pep8 mycroft test
- "PYTHONPATH=. python test/test_runner.py --fail-on-error"

View File

@ -50,4 +50,4 @@ def main():
if __name__ == "__main__":
main()
main()

View File

@ -1,6 +1,11 @@
from setuptools import setup
from mycroft.util.setup_base import find_all_packages, required, get_version, place_manifest
from mycroft.util.setup_base import (
find_all_packages,
required,
get_version,
place_manifest
)
__author__ = 'seanfitz'

View File

@ -10,7 +10,8 @@ class EnclosureAPI:
"""
This API is intended to be used to control Mycroft hardware capabilities.
It exposes all possible enclosure commands to be performed by a Mycroft unit.
It exposes all possible enclosure commands to be performed by a Mycroft
unit.
"""
def __init__(self, client):
@ -23,7 +24,8 @@ class EnclosureAPI:
self.client.emit(Message("enclosure.system.unmute"))
def system_blink(self, times):
self.client.emit(Message("enclosure.system.blink", metadata={'times': times}))
self.client.emit(
Message("enclosure.system.blink", metadata={'times': times}))
def eyes_on(self):
self.client.emit(Message("enclosure.eyes.on"))
@ -32,19 +34,23 @@ class EnclosureAPI:
self.client.emit(Message("enclosure.eyes.off"))
def eyes_blink(self, side):
self.client.emit(Message("enclosure.eyes.blink", metadata={'side': side}))
self.client.emit(
Message("enclosure.eyes.blink", metadata={'side': side}))
def eyes_narrow(self):
self.client.emit(Message("enclosure.eyes.narrow"))
def eyes_look(self, side):
self.client.emit(Message("enclosure.eyes.look", metadata={'side': side}))
self.client.emit(
Message("enclosure.eyes.look", metadata={'side': side}))
def eyes_color(self, r=255, g=255, b=255):
self.client.emit(Message("enclosure.eyes.color", metadata={'r': r, 'g': g, 'b': b}))
self.client.emit(
Message("enclosure.eyes.color", metadata={'r': r, 'g': g, 'b': b}))
def eyes_brightness(self, level=30):
self.client.emit(Message("enclosure.eyes.level", metadata={'level': level}))
self.client.emit(
Message("enclosure.eyes.level", metadata={'level': level}))
def mouth_reset(self):
self.client.emit(Message("enclosure.mouth.reset"))
@ -62,4 +68,5 @@ class EnclosureAPI:
self.client.emit(Message("enclosure.mouth.smile"))
def mouth_text(self, text=""):
self.client.emit(Message("enclosure.mouth.text", metadata={'text': text}))
self.client.emit(
Message("enclosure.mouth.text", metadata={'text': text}))

View File

@ -21,7 +21,8 @@ class EnclosureReader(Thread):
"""
Reads data from Serial port.
Listens to all commands sent by Arduino that must be be performed on Mycroft Core.
Listens to all commands sent by Arduino that must be be performed on
Mycroft Core.
E.g. Mycroft Stop Feature
#. Arduino sends a Stop command after a button press on a Mycroft unit
@ -62,7 +63,8 @@ class EnclosureReader(Thread):
class EnclosureWriter(Thread):
"""
Writes data to Serial port.
#. Enqueues all commands received from Mycroft enclosures implementation
#. Enqueues all commands received from Mycroft enclosures
implementation
#. Process them on the received order by writing on the Serial port
E.g. Displaying a text on Mycroft's Mouth
@ -109,7 +111,8 @@ class Enclosure:
E.g. ``EnclosureEyes``, ``EnclosureMouth`` and ``EnclosureArduino``
It also listens to the basis events in order to perform those core actions on the unit.
It also listens to the basis events in order to perform those core actions
on the unit.
E.g. Start and Stop talk animation
"""
@ -130,10 +133,14 @@ class Enclosure:
self.port = self.config.get("port")
self.rate = int(self.config.get("rate"))
self.timeout = int(self.config.get("timeout"))
self.serial = serial.serial_for_url(url=self.port, baudrate=self.rate, timeout=self.timeout)
LOGGER.info("Connected to: " + self.port + " rate: " + str(self.rate) + " timeout: " + str(self.timeout))
self.serial = serial.serial_for_url(
url=self.port, baudrate=self.rate, timeout=self.timeout)
LOGGER.info(
"Connected to: " + self.port + " rate: " + str(self.rate) +
" timeout: " + str(self.timeout))
except:
LOGGER.error("It is not possible to connect to serial port: " + self.port)
LOGGER.error(
"It is not possible to connect to serial port: " + self.port)
raise
def __init_events(self):

View File

@ -9,7 +9,9 @@ from speech_recognition import AudioData
from mycroft.client.speech import wakeword_recognizer
from mycroft.client.speech.mic import MutableMicrophone, Recognizer
from mycroft.client.speech.recognizer_wrapper import RemoteRecognizerWrapperFactory
from mycroft.client.speech.recognizer_wrapper import (
RemoteRecognizerWrapperFactory
)
from mycroft.configuration.config import ConfigurationManager
from mycroft.messagebus.message import Message
from mycroft.metrics import MetricsAggregator, Stopwatch
@ -26,8 +28,8 @@ speech_config = ConfigurationManager.get_config().get('speech_client')
class AudioProducer(threading.Thread):
"""
AudioProducer
given a mic and a recognizer implementation, continuously listens to the mic for
potential speech chunks and pushes them onto the queue.
given a mic and a recognizer implementation, continuously listens to the
mic for potential speech chunks and pushes them onto the queue.
"""
def __init__(self, state, queue, mic, recognizer, emitter):
threading.Thread.__init__(self)
@ -47,8 +49,9 @@ class AudioProducer(threading.Thread):
audio = self.recognizer.listen(source)
self.queue.put(audio)
except IOError, ex:
# NOTE: Audio stack on raspi is slightly different, throws IOError every other listen,
# almost like it can't handle buffering audio between listen loops.
# NOTE: Audio stack on raspi is slightly different, throws
# IOError every other listen, almost like it can't handle
# buffering audio between listen loops.
# The internet was not helpful.
# http://stackoverflow.com/questions/10733903/pyaudio-input-overflowed
self.emitter.emit("recognizer_loop:ioerror", ex)
@ -58,13 +61,18 @@ class WakewordExtractor:
MAX_ERROR_SECONDS = 0.02
TRIM_SECONDS = 0.1
PUSH_BACK_SECONDS = 0.2 # The seconds the safe end position is pushed back to ensure pocketsphinx is consistent
SILENCE_SECONDS = 0.2 # The seconds of silence padded where the wakeword was removed
# The seconds the safe end position is pushed back to ensure pocketsphinx
# is consistent
PUSH_BACK_SECONDS = 0.2
# The seconds of silence padded where the wakeword was removed
SILENCE_SECONDS = 0.2
def __init__(self, audio_data, recognizer, metrics):
self.audio_data = audio_data
self.recognizer = recognizer
self.silence_data = self.__generate_silence(self.SILENCE_SECONDS, self.audio_data.sample_rate, self.audio_data.sample_width)
self.silence_data = self.__generate_silence(
self.SILENCE_SECONDS, self.audio_data.sample_rate,
self.audio_data.sample_width)
self.wav_data = self.audio_data.get_wav_data()
self.AUDIO_SIZE = float(len(self.wav_data))
self.range = self.Range(0, self.AUDIO_SIZE / 2)
@ -101,15 +109,18 @@ class WakewordExtractor:
return False
def audio_pos(self, raw_pos):
return int(self.audio_data.sample_width * round(float(raw_pos)/self.audio_data.sample_width))
return int(self.audio_data.sample_width *
round(float(raw_pos)/self.audio_data.sample_width))
def get_audio_segment(self, begin, end):
return self.wav_data[self.audio_pos(begin) : self.audio_pos(end)]
return self.wav_data[self.audio_pos(begin): self.audio_pos(end)]
def __calculate_marker(self, use_begin, sign_if_found, range, delta):
while 2 * delta >= self.MAX_ERROR_SECONDS * self.audio_data.sample_rate * self.audio_data.sample_width:
while (2 * delta >= self.MAX_ERROR_SECONDS *
self.audio_data.sample_rate * self.audio_data.sample_width):
byte_data = self.get_audio_segment(range.begin, range.end)
found = self.__found_in_segment("mycroft", byte_data, self.recognizer, self.metrics)
found = self.__found_in_segment(
"mycroft", byte_data, self.recognizer, self.metrics)
sign = sign_if_found if found else -sign_if_found
range.add_to_marker(use_begin, delta * sign)
delta /= 2
@ -117,26 +128,37 @@ class WakewordExtractor:
def calculate_range(self):
delta = self.AUDIO_SIZE / 4
self.range.end = self.__calculate_marker(False, -1, self.Range(0, self.AUDIO_SIZE / 2), delta)
self.range.end = self.__calculate_marker(
False, -1, self.Range(0, self.AUDIO_SIZE / 2), delta)
# Ensures the end position is well past the wakeword part of the audio
pos_end_safe = min(self.AUDIO_SIZE, self.range.end + self.PUSH_BACK_SECONDS * self.audio_data.sample_rate * self.audio_data.sample_width)
pos_end_safe = min(
self.AUDIO_SIZE, self.range.end + self.PUSH_BACK_SECONDS *
self.audio_data.sample_rate * self.audio_data.sample_width)
delta = pos_end_safe / 4
begin = pos_end_safe / 2
self.range.begin = self.__calculate_marker(True, 1, self.Range(begin, pos_end_safe), delta)
self.range.narrow(self.TRIM_SECONDS * self.audio_data.sample_rate * self.audio_data.sample_width)
self.range.begin = self.__calculate_marker(
True, 1, self.Range(begin, pos_end_safe), delta)
self.range.narrow(self.TRIM_SECONDS * self.audio_data.sample_rate *
self.audio_data.sample_width)
@staticmethod
def __generate_silence(seconds, sample_rate, sample_width):
return '\0'*int(seconds * sample_rate * sample_width)
def get_audio_data_before(self):
byte_data = self.get_audio_segment(0, self.range.begin) + self.silence_data
return AudioData(byte_data, self.audio_data.sample_rate,self.audio_data.sample_width)
byte_data = self.get_audio_segment(
0, self.range.begin) + self.silence_data
return AudioData(
byte_data, self.audio_data.sample_rate,
self.audio_data.sample_width)
def get_audio_data_after(self):
byte_data = self.silence_data + self.get_audio_segment(self.range.end, self.AUDIO_SIZE)
return AudioData(byte_data, self.audio_data.sample_rate,self.audio_data.sample_width)
byte_data = self.silence_data + self.get_audio_segment(
self.range.end, self.AUDIO_SIZE)
return AudioData(
byte_data, self.audio_data.sample_rate,
self.audio_data.sample_width)
class AudioConsumer(threading.Thread):
@ -145,10 +167,13 @@ class AudioConsumer(threading.Thread):
Consumes AudioData chunks off the queue
"""
MIN_AUDIO_SIZE = 1.0 # In seconds, the minimum audio size to be sent to remote STT
# In seconds, the minimum audio size to be sent to remote STT
MIN_AUDIO_SIZE = 1.0
def __init__(self, state, queue, emitter, wakeup_recognizer, wakeword_recognizer,
wrapped_remote_recognizer, wakeup_prefixes, wakeup_words):
def __init__(
self, state, queue, emitter, wakeup_recognizer,
wakeword_recognizer, wrapped_remote_recognizer, wakeup_prefixes,
wakeup_words):
threading.Thread.__init__(self)
self.daemon = True
self.queue = queue
@ -167,17 +192,20 @@ class AudioConsumer(threading.Thread):
@staticmethod
def _audio_length(audio):
return float(len(audio.frame_data))/(audio.sample_rate*audio.sample_width)
return float(
len(audio.frame_data))/(audio.sample_rate*audio.sample_width)
def try_consume_audio(self):
timer = Stopwatch()
hyp = None
audio = self.queue.get()
self.metrics.timer("mycroft.recognizer.audio.length_s", self._audio_length(audio))
self.metrics.timer(
"mycroft.recognizer.audio.length_s", self._audio_length(audio))
self.queue.task_done()
timer.start()
if self.state.sleeping:
hyp = self.wakeup_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
hyp = self.wakeup_recognizer.transcribe(
audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr:
logger.debug("sleeping recognition: " + hyp.hypstr)
if hyp and hyp.hypstr.lower().find("wake up") >= 0:
@ -187,17 +215,24 @@ class AudioConsumer(threading.Thread):
self.metrics.increment("mycroft.wakeup")
else:
if not self.state.skip_wakeword:
hyp = self.ww_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
hyp = self.ww_recognizer.transcribe(
audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr.lower().find("mycroft") >= 0:
extractor = WakewordExtractor(audio, self.ww_recognizer, self.metrics)
extractor = WakewordExtractor(
audio, self.ww_recognizer, self.metrics)
timer.lap()
extractor.calculate_range()
self.metrics.timer("mycroft.recognizer.extractor.time_s", timer.lap())
self.metrics.timer(
"mycroft.recognizer.extractor.time_s", timer.lap())
audio_before = extractor.get_audio_data_before()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_before))
self.metrics.timer(
"mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_before))
audio_after = extractor.get_audio_data_after()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_after))
self.metrics.timer(
"mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_after))
SessionManager.touch()
payload = {
@ -220,7 +255,8 @@ class AudioConsumer(threading.Thread):
try:
self.transcribe([audio])
except sr.UnknownValueError:
logger.warn("Speech Recognition could not understand audio")
logger.warn(
"Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
self.metrics.increment("mycroft.recognizer.error")
self.state.skip_wakeword = False
@ -230,27 +266,36 @@ class AudioConsumer(threading.Thread):
def __speak(self, utterance):
"""
Speak commands should be asynchronous to avoid filling up the portaudio buffer.
Speak commands should be asynchronous to avoid filling up the
portaudio buffer.
:param utterance:
:return:
"""
def target():
self.emitter.emit("speak", Message("speak", metadata={'utterance': utterance,
'session': SessionManager.get().session_id}))
self.emitter.emit(
"speak",
Message("speak",
metadata={'utterance': utterance,
'session': SessionManager.get().session_id}))
threading.Thread(target=target).start()
def _create_remote_stt_runnable(self, audio, utterances):
def runnable():
try:
text = self.wrapped_remote_recognizer.transcribe(audio, metrics=self.metrics).lower()
text = self.wrapped_remote_recognizer.transcribe(
audio, metrics=self.metrics).lower()
except sr.UnknownValueError:
pass
except sr.RequestError as e:
logger.error("Could not request results from Speech Recognition service; {0}".format(e))
logger.error(
"Could not request results from Speech Recognition "
"service; {0}".format(e))
except CerberusAccessDenied as e:
logger.error("AccessDenied from Cerberus proxy.")
self.__speak("Your device is not registered yet. To start pairing, login at cerberus.mycroft.ai")
self.__speak(
"Your device is not registered yet. To start pairing, "
"login at cerberus.mycroft.ai")
utterances.append("pair my device")
else:
logger.debug("STT: " + text)
@ -297,15 +342,20 @@ class RecognizerLoop(pyee.EventEmitter):
device_index=None,
lang=core_config.get('lang')):
pyee.EventEmitter.__init__(self)
self.microphone = MutableMicrophone(sample_rate=sample_rate, device_index=device_index)
self.microphone = MutableMicrophone(
sample_rate=sample_rate, device_index=device_index)
self.microphone.CHANNELS = channels
self.ww_recognizer = wakeword_recognizer.create_recognizer(samprate=sample_rate, lang=lang)
self.wakeup_recognizer = wakeword_recognizer.create_recognizer(samprate=sample_rate, lang=lang,
keyphrase="wake up mycroft") # TODO - localization
self.ww_recognizer = wakeword_recognizer.create_recognizer(
samprate=sample_rate, lang=lang)
self.wakeup_recognizer = wakeword_recognizer.create_recognizer(
samprate=sample_rate, lang=lang,
keyphrase="wake up mycroft") # TODO - localization
self.remote_recognizer = Recognizer()
basedir = os.path.dirname(__file__)
self.wakeup_words = read_stripped_lines(os.path.join(basedir, 'model', lang, 'WakeUpWord.voc'))
self.wakeup_prefixes = read_stripped_lines(os.path.join(basedir, 'model', lang, 'PrefixWakeUp.voc'))
self.wakeup_words = read_stripped_lines(os.path.join(
basedir, 'model', lang, 'WakeUpWord.voc'))
self.wakeup_prefixes = read_stripped_lines(
os.path.join(basedir, 'model', lang, 'PrefixWakeUp.voc'))
self.state = RecognizerLoopState()
def start_async(self):
@ -321,7 +371,8 @@ class RecognizerLoop(pyee.EventEmitter):
self,
self.wakeup_recognizer,
self.ww_recognizer,
RemoteRecognizerWrapperFactory.wrap_recognizer(self.remote_recognizer),
RemoteRecognizerWrapperFactory.wrap_recognizer(
self.remote_recognizer),
self.wakeup_prefixes,
self.wakeup_words).start()

View File

@ -16,6 +16,7 @@ loop = None
config = ConfigurationManager.get_config()
def handle_listening():
logger.info("Listening...")
client.emit(Message('recognizer_loop:listening'))
@ -79,7 +80,9 @@ def main():
loop.on('recognizer_loop:utterance', handle_utterance)
loop.on('speak', handle_speak)
client.on('speak', handle_speak)
client.on('multi_utterance_intent_failure', handle_multi_utterance_intent_failure)
client.on(
'multi_utterance_intent_failure',
handle_multi_utterance_intent_failure)
client.on('recognizer_loop:sleep', handle_sleep)
client.on('recognizer_loop:wake_up', handle_wake_up)
event_thread = Thread(target=connect)

View File

@ -4,7 +4,12 @@ import audioop
from time import sleep
import pyaudio
from speech_recognition import Microphone, AudioSource, WaitTimeoutError, AudioData
from speech_recognition import (
Microphone,
AudioSource,
WaitTimeoutError,
AudioData
)
import speech_recognition
from mycroft.util.log import getLogger
logger = getLogger(__name__)
@ -13,7 +18,7 @@ __author__ = 'seanfitz'
class MutableStream(object):
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream != None
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.muted = muted
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
@ -57,17 +62,21 @@ class MutableStream(object):
class MutableMicrophone(Microphone):
def __init__(self, device_index = None, sample_rate = 16000, chunk_size = 1024):
Microphone.__init__(self, device_index=device_index, sample_rate=sample_rate, chunk_size=chunk_size)
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024):
Microphone.__init__(
self, device_index=device_index, sample_rate=sample_rate,
chunk_size=chunk_size)
self.muted = False
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index = self.device_index, channels = 1,
format = self.format, rate = self.SAMPLE_RATE, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
@ -94,45 +103,71 @@ class Recognizer(speech_recognition.Recognizer):
speech_recognition.Recognizer.__init__(self)
self.max_audio_length_sec = 30
def listen(self, source, timeout = None):
def listen(self, source, timeout=None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
Records a single phrase from ``source`` (an ``AudioSource`` instance)
into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
This is done by waiting until the audio has an energy above
``recognizer_instance.energy_threshold`` (the user has started
speaking), and then recording until it encounters
``recognizer_instance.pause_threshold`` seconds of non-speaking or
there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that it will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, it will wait indefinitely.
The ``timeout`` parameter is the maximum number of seconds that it
will wait for a phrase to start before giving up and throwing an
``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is
``None``, it will wait indefinitely.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert isinstance(source, AudioSource), \
"Source must be an audio source"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio before the phrase is complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after
# number of buffers of non-speaking audio before the phrase is
# complete
pause_buffer_count = int(
math.ceil(self.pause_threshold / seconds_per_buffer))
# minimum number of buffers of speaking audio before we consider the
# speaking audio a phrase
phrase_buffer_count = int(math.ceil(self.phrase_threshold /
seconds_per_buffer))
# maximum number of buffers of non-speaking audio to retain before and
# after
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration /
seconds_per_buffer))
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
# read audio input for phrases until there is a phrase that is long
# enough
elapsed_time = 0 # number of seconds of audio read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
# handle timeout if specified
if timeout and elapsed_time > timeout:
raise WaitTimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
if len(buffer) == 0:
break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
# ensure we only keep the needed amount of non-speaking buffers
if len(frames) > non_speaking_buffer_count:
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# energy of the audio signal
energy = audioop.rms(buffer, source.SAMPLE_WIDTH)
if energy > self.energy_threshold:
break
# dynamically adjust the energy threshold using assymmetric weighted average
# do not adjust dynamic energy level for this sample if it is muted audio (energy == 0)
# dynamically adjust the energy threshold using assymmetric
# weighted average
# do not adjust dynamic energy level for this sample if it is
# muted audio (energy == 0)
self.adjust_energy_threshold(energy, seconds_per_buffer)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
@ -140,38 +175,51 @@ class Recognizer(speech_recognition.Recognizer):
elapsed_time += seconds_per_buffer
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
if len(buffer) == 0:
break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# check if speaking has stopped for longer than the pause
# threshold on the audio input
# energy of the audio signal
energy = audioop.rms(buffer, source.SAMPLE_WIDTH)
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
if pause_count > pause_buffer_count: # end of the phrase
break
if len(frames) * seconds_per_buffer >= self.max_audio_length_sec:
# if we hit the end of the audio length, readjust energy_threshold
if (len(frames) * seconds_per_buffer >=
self.max_audio_length_sec):
# if we hit the end of the audio length, readjust
# energy_threshold
for frame in frames:
energy = audioop.rms(frame, source.SAMPLE_WIDTH)
self.adjust_energy_threshold(energy, seconds_per_buffer)
self.adjust_energy_threshold(
energy, seconds_per_buffer)
break
# check how long the detected phrase is, and retry listening if the phrase is too short
# check how long the detected phrase is, and retry listening if
# the phrase is too short
phrase_count -= pause_count
if phrase_count >= phrase_buffer_count: break # phrase is long enough, stop listening
if phrase_count >= phrase_buffer_count:
break # phrase is long enough, stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
for i in range(pause_count - non_speaking_buffer_count):
frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_energy_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))

View File

@ -21,17 +21,21 @@ class GoogleRecognizerWrapper(object):
def __init__(self, recognizer):
self.recognizer = recognizer
def transcribe(self, audio, language="en-US", show_all=False, metrics=None):
def transcribe(
self, audio, language="en-US", show_all=False, metrics=None):
key = config.get('goog_api_key')
return self.recognizer.recognize_google(audio, key=key, language=language, show_all=show_all)
return self.recognizer.recognize_google(
audio, key=key, language=language, show_all=show_all)
class WitRecognizerWrapper(object):
def __init__(self, recognizer):
self.recognizer = recognizer
def transcribe(self, audio, language="en-US", show_all=False, metrics=None):
assert language == "en-US", "language must be default, language parameter not supported."
def transcribe(
self, audio, language="en-US", show_all=False, metrics=None):
assert language == "en-US", \
"language must be default, language parameter not supported."
key = config.get('wit_api_key')
return self.recognizer.recognize_wit(audio, key, show_all=show_all)
@ -40,23 +44,27 @@ class IBMRecognizerWrapper(object):
def __init__(self, recognizer):
self.recognizer = recognizer
def transcribe(self, audio, language="en-US", show_all=False, metrics=None):
def transcribe(
self, audio, language="en-US", show_all=False, metrics=None):
username = config.get('ibm_username')
password = config.get('ibm_password')
return self.recognizer.recognize_ibm(audio, username, password, language=language, show_all=show_all)
return self.recognizer.recognize_ibm(
audio, username, password, language=language, show_all=show_all)
class CerberusGoogleProxy(object):
def __init__(self, _):
self.version = get_version()
def transcribe(self, audio, language="en-US", show_all=False, metrics=None):
def transcribe(
self, audio, language="en-US", show_all=False, metrics=None):
timer = Stopwatch()
timer.start()
identity = IdentityManager().get()
headers = {}
if identity.token:
headers['Authorization'] = 'Bearer %s:%s' % (identity.device_id, identity.token)
headers['Authorization'] = 'Bearer %s:%s' % (
identity.device_id, identity.token)
response = requests.post(config.get("proxy_host") +
"/stt/google_v2?language=%s&version=%s"
@ -78,21 +86,27 @@ class CerberusGoogleProxy(object):
raise UnknownValueError()
log.info("STT JSON: " + json.dumps(actual_result))
if show_all: return actual_result
if show_all:
return actual_result
# return the best guess
if "alternative" not in actual_result: raise UnknownValueError()
if "alternative" not in actual_result:
raise UnknownValueError()
alternatives = actual_result["alternative"]
if len([alt for alt in alternatives if alt.get('confidence')]) > 0:
# if there is at least one element with confidence, force it to the front
alternatives.sort(key=lambda e: e.get('confidence', 0.0), reverse=True)
# if there is at least one element with confidence, force it to
# the front
alternatives.sort(
key=lambda e: e.get('confidence', 0.0), reverse=True)
for entry in alternatives:
if "transcript" in entry:
return entry["transcript"]
if len(alternatives) > 0:
log.error("Found %d entries, but none with a transcript." % len(alternatives))
log.error(
"Found %d entries, but none with a transcript." % len(
alternatives))
# no transcriptions available
raise UnknownValueError()

View File

@ -1,28 +1,30 @@
from mycroft.metrics import Stopwatch
__author__ = 'seanfitz'
import os
import sys
from pocketsphinx import *
from cmath import exp, pi
__author__ = 'seanfitz'
BASEDIR = os.path.dirname(os.path.abspath(__file__))
from cmath import exp, pi
def fft(x):
"""
fft function to clean data, but most be converted to array of IEEE floats first
fft function to clean data, but most be converted to array of IEEE floats
first
:param x:
:return:
"""
N = len(x)
if N <= 1: return x
if N <= 1:
return x
even = fft(x[0::2])
odd = fft(x[1::2])
T= [exp(-2j*pi*k/N)*odd[k] for k in xrange(N/2)]
odd = fft(x[1::2])
T = [exp(-2j*pi*k/N)*odd[k] for k in xrange(N/2)]
return [even[k] + T[k] for k in xrange(N/2)] + \
[even[k] - T[k] for k in xrange(N/2)]
@ -45,8 +47,10 @@ class Recognizer(object):
def create_recognizer(samprate=16000, lang="en-us", keyphrase="hey mycroft"):
sphinx_config = Decoder.default_config()
sphinx_config.set_string('-hmm', os.path.join(BASEDIR, 'model', lang, 'hmm'))
sphinx_config.set_string('-dict', os.path.join(BASEDIR, 'model', lang, 'mycroft-en-us.dict'))
sphinx_config.set_string(
'-hmm', os.path.join(BASEDIR, 'model', lang, 'hmm'))
sphinx_config.set_string(
'-dict', os.path.join(BASEDIR, 'model', lang, 'mycroft-en-us.dict'))
sphinx_config.set_string('-keyphrase', keyphrase)
sphinx_config.set_float('-kws_threshold', float('1e-45'))
sphinx_config.set_float('-samprate', samprate)
@ -55,4 +59,4 @@ def create_recognizer(samprate=16000, lang="en-us", keyphrase="hey mycroft"):
decoder = Decoder(sphinx_config)
return Recognizer(decoder)
return Recognizer(decoder)

View File

@ -31,7 +31,7 @@ def connect():
def main():
global client
client = WebsocketClient()
if not '--quiet' in sys.argv:
if '--quiet' not in sys.argv:
client.on('speak', handle_speak)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
@ -40,7 +40,9 @@ def main():
while True:
print("Input:")
line = sys.stdin.readline()
client.emit(Message("recognizer_loop:utterance", metadata={'utterances': [line.strip()]}))
client.emit(
Message("recognizer_loop:utterance",
metadata={'utterances': [line.strip()]}))
except KeyboardInterrupt, e:
event_thread.exit()
sys.exit()

View File

@ -10,9 +10,11 @@ from mycroft.util.log import getLogger
__author__ = 'seanfitz'
logger = getLogger(__name__)
DEFAULTS_FILE = os.path.join(os.path.dirname(__file__), 'defaults', 'defaults.ini')
DEFAULTS_FILE = os.path.join(
os.path.dirname(__file__), 'defaults', 'defaults.ini')
ETC_CONFIG_FILE = '/etc/mycroft/mycroft.ini'
USER_CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.mycroft/mycroft.ini')
USER_CONFIG_FILE = os.path.join(
os.path.expanduser('~'), '.mycroft/mycroft.ini')
DEFAULT_LOCATIONS = [DEFAULTS_FILE, ETC_CONFIG_FILE, USER_CONFIG_FILE]
@ -35,7 +37,8 @@ class ConfigurationLoader(object):
def load(self):
"""
Loads configuration files from disk, in the locations defined by DEFAULT_LOCATIONS
Loads configuration files from disk, in the locations defined by
DEFAULT_LOCATIONS
"""
config = {}
for config_file in self.config_locations:
@ -45,10 +48,12 @@ class ConfigurationLoader(object):
cobj = ConfigObj(config_file)
config = ConfigurationLoader._overwrite_merge(config, cobj)
except Exception, e:
logger.error("Error loading config file [%s]" % config_file)
logger.error(
"Error loading config file [%s]" % config_file)
logger.error(repr(e))
else:
logger.debug("Could not find config file at [%s]" % config_file)
logger.debug(
"Could not find config file at [%s]" % config_file)
return config
@ -70,23 +75,32 @@ class RemoteConfiguration(object):
def update(self):
config = self.config_manager.get_config()
remote_config_url = config.get("remote_configuration").get("url")
enabled = str2bool(config.get("remote_configuration").get("enabled", "False"))
enabled = str2bool(
config.get("remote_configuration").get("enabled", "False"))
if enabled and self.identity.token:
auth_header = "Bearer %s:%s" % (self.identity.device_id, self.identity.token)
auth_header = "Bearer %s:%s" % (
self.identity.device_id, self.identity.token)
try:
response = requests.get(remote_config_url, headers={"Authorization": auth_header})
response = requests.get(
remote_config_url, headers={"Authorization": auth_header})
user = response.json()
for attribute in user["attributes"]:
attribute_name = attribute.get("attribute_name")
core_config_name = self.remote_config_mapping.get(attribute_name)
core_config_name = self.remote_config_mapping.get(
attribute_name)
if core_config_name:
config["core"][core_config_name] = str(attribute.get("attribute_value"))
logger.info("Accepting remote configuration: core[%s] == %s" % (core_config_name, attribute["attribute_value"]))
config["core"][core_config_name] = str(
attribute.get("attribute_value"))
logger.info(
"Accepting remote configuration: core[%s] == %s" %
(core_config_name, attribute["attribute_value"]))
except Exception as e:
logger.error("Failed to fetch remote configuration: %s" % repr(e))
logger.error(
"Failed to fetch remote configuration: %s" % repr(e))
else:
logger.debug("Device not paired, cannot retrieve remote configuration.")
logger.debug(
"Device not paired, cannot retrieve remote configuration.")
class ConfigurationManager(object):
@ -101,7 +115,8 @@ class ConfigurationManager(object):
Load default config files as well as any additionally specified files.
Now also loads configuration from Cerberus (if device is paired)
:param config_files: An array of config file paths in addition to DEFAULT_LOCATIONS
:param config_files: An array of config file paths in addition to
DEFAULT_LOCATIONS
:return: None
"""

View File

@ -38,22 +38,25 @@ class MustacheDialogRenderer(object):
def render(self, template_name, context={}, index=None):
"""
Given a template name, pick a template and render it with the provided context.
Given a template name, pick a template and render it with the provided
context.
:param template_name: the name of a template group.
:param context: dictionary representing values to be rendered
:param index: optional, the specific index in the collection of templates
:param index: optional, the specific index in the collection of
templates
:raises NotImplementedError: if no template can be found identified by template_name
:raises NotImplementedError: if no template can be found identified by
template_name
:return:
"""
if template_name not in self.templates:
raise NotImplementedError("Template not found: %s" % template_name)
template_functions = self.templates.get(template_name)
if index == None:
if index is None:
index = random.randrange(len(template_functions))
else:
index %= len(template_functions)
@ -79,10 +82,11 @@ class DialogLoader(object):
logger.warn("No dialog found: " + dialog_dir)
return self.__renderer
for f in sorted(filter(lambda x: os.path.isfile(os.path.join(dialog_dir, x)), os.listdir(dialog_dir))):
for f in sorted(
filter(lambda x: os.path.isfile(
os.path.join(dialog_dir, x)), os.listdir(dialog_dir))):
dialog_entry_name = os.path.splitext(f)[0]
self.__renderer.load_template_file(dialog_entry_name, os.path.join(dialog_dir, f))
self.__renderer.load_template_file(
dialog_entry_name, os.path.join(dialog_dir, f))
return self.__renderer

View File

@ -6,7 +6,8 @@ __author__ = 'jdorleans'
class FileSystemAccess(object):
"""
A class for providing access to the mycroft FS sandbox. Intended to be attached to skills
A class for providing access to the mycroft FS sandbox. Intended to be
attached to skills
at initialization time to provide a skill-specific namespace.
"""
def __init__(self, path):
@ -24,9 +25,11 @@ class FileSystemAccess(object):
def open(self, filename, mode):
"""
Get a handle to a file (with the provided mode) within the skill-specific namespace.
Get a handle to a file (with the provided mode) within the
skill-specific namespace.
:param filename: a str representing a path relative to the namespace. subdirs not currently supported.
:param filename: a str representing a path relative to the namespace.
subdirs not currently supported.
:param mode: a file handle mode

View File

@ -26,7 +26,8 @@ class IdentityManager(object):
def initialize(self):
if self.filesystem.exists('identity.json'):
self.identity = DeviceIdentity.load(self.filesystem.open('identity.json', 'r'))
self.identity = DeviceIdentity.load(self.filesystem.open(
'identity.json', 'r'))
else:
identity = DeviceIdentity(device_id=str(uuid4()))
self.update(identity)

View File

@ -32,11 +32,12 @@ class WebsocketClient(object):
self.pool = ThreadPool(10)
def _create_new_connection(self):
return WebSocketApp(self.scheme + "://" + self.host + ":" + str(self.port) + self.path,
on_open=self.on_open,
on_close=self.on_close,
on_error=self.on_error,
on_message=self.on_message)
return WebSocketApp(
self.scheme + "://" + self.host + ":" + str(self.port) + self.path,
on_open=self.on_open,
on_close=self.on_close,
on_error=self.on_error,
on_message=self.on_message)
def on_open(self, ws):
logger.info("Connected")
@ -52,7 +53,8 @@ class WebsocketClient(object):
except Exception, e:
logger.error(repr(e))
sleep_time = self.exp_backoff_counter
logger.warn("Disconnecting on error, reconnecting in %d seconds." % sleep_time)
logger.warn(
"Disconnecting on error, reconnecting in %d seconds." % sleep_time)
self.exp_backoff_counter = min(self.exp_backoff_counter * 2, 60)
time.sleep(sleep_time)
self.client = self._create_new_connection()
@ -61,10 +63,12 @@ class WebsocketClient(object):
def on_message(self, ws, message):
self.emitter.emit('message', message)
parsed_message = Message.deserialize(message)
self.pool.apply_async(self.emitter.emit, (parsed_message.message_type, parsed_message))
self.pool.apply_async(
self.emitter.emit, (parsed_message.message_type, parsed_message))
def emit(self, message):
if not self.client or not self.client.sock or not self.client.sock.connected:
if (not self.client or not self.client.sock or
not self.client.sock.connected):
return
if hasattr(message, 'serialize'):
self.client.send(message.serialize())
@ -86,6 +90,7 @@ class WebsocketClient(object):
def echo():
client = WebsocketClient()
def echo(message):
logger.info(message)
@ -97,4 +102,4 @@ def echo():
client.run_forever()
if __name__ == "__main__":
echo()
echo()

View File

@ -1,6 +1,7 @@
__author__ = 'seanfitz'
import json
__author__ = 'seanfitz'
class Message(object):
def __init__(self, message_type, metadata={}, context=None):
@ -44,4 +45,4 @@ class Message(object):
if 'target' in new_context:
del new_context['target']
return Message(message_type, metadata, context=new_context)
return Message(message_type, metadata, context=new_context)

View File

@ -11,8 +11,6 @@ settings = {
}
def main():
import tornado.options
tornado.options.parse_command_line()

View File

@ -18,7 +18,8 @@ client_connections = []
class WebsocketEventHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
tornado.websocket.WebSocketHandler.__init__(
self, application, request, **kwargs)
self.emitter = EventBusEmitter
def on(self, event_name, handler):
@ -32,7 +33,8 @@ class WebsocketEventHandler(tornado.websocket.WebSocketHandler):
return
try:
self.emitter.emit(deserialized_message.message_type, deserialized_message)
self.emitter.emit(
deserialized_message.message_type, deserialized_message)
except Exception, e:
traceback.print_exc(file=sys.stdout)
pass
@ -48,7 +50,8 @@ class WebsocketEventHandler(tornado.websocket.WebSocketHandler):
client_connections.remove(self)
def emit(self, channel_message):
if hasattr(channel_message, 'serialize') and callable(getattr(channel_message, 'serialize')):
if (hasattr(channel_message, 'serialize') and
callable(getattr(channel_message, 'serialize'))):
self.write_message(channel_message.serialize())
else:
self.write_message(json.dumps(channel_message))

View File

@ -12,6 +12,7 @@ from mycroft.util.setup_base import get_version
config = ConfigurationManager.get_config().get('metrics_client')
metrics_log = getLogger("METRICS")
class Stopwatch(object):
def __init__(self):
self.timestamp = None
@ -77,9 +78,11 @@ class MetricsAggregator(object):
'attributes': self._attributes
}
self.clear()
count = len(payload['counters']) + len(payload['timers']) + len(payload['levels'])
count = (len(payload['counters']) + len(payload['timers']) +
len(payload['levels']))
if count > 0:
metrics_log.debug(json.dumps(payload))
def publish():
publisher.publish(payload)
threading.Thread(target=publish).start()
@ -97,4 +100,7 @@ class MetricsPublisher(object):
session_id = SessionManager.get().session_id
events['session_id'] = session_id
if self.enabled:
requests.post(self.url, headers={'Content-Type': 'application/json'}, data=json.dumps(events), verify=False)
requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(events), verify=False)

View File

@ -26,10 +26,12 @@ class DevicePairingClient(object):
ssl=str2bool(config.get("ssl")))
self.identity_manager = IdentityManager()
self.identity = self.identity_manager.identity
self.pairing_code = pairing_code if pairing_code else generate_pairing_code()
self.pairing_code = (
pairing_code if pairing_code else generate_pairing_code())
def on_registration(self, message):
# TODO: actually accept the configuration message and store it in identity
# TODO: actually accept the configuration message and store it in
# identity
identity = self.identity_manager.get()
register_payload = message.metadata
if register_payload.get("device_id") == identity.device_id:
@ -57,9 +59,8 @@ class DevicePairingClient(object):
self.ws_client.run_forever()
def main():
DevicePairingClient().run()
if __name__ == "__main__":
main()
main()

View File

@ -53,10 +53,14 @@ class SessionManager(object):
:return: An active session
"""
with SessionManager.__lock:
if not SessionManager.__current_session or SessionManager.__current_session.expired():
SessionManager.__current_session = Session(str(uuid4()),
expiration_seconds=config.get('session_ttl_seconds', 180))
logger.info("New Session Start: " + SessionManager.__current_session.session_id)
if (not SessionManager.__current_session or
SessionManager.__current_session.expired()):
SessionManager.__current_session = Session(
str(uuid4()),
expiration_seconds=config.get('session_ttl_seconds', 180))
logger.info(
"New Session Start: " +
SessionManager.__current_session.session_id)
return SessionManager.__current_session
@staticmethod
@ -67,5 +71,3 @@ class SessionManager(object):
:return: None
"""
SessionManager.get().touch()

View File

@ -23,7 +23,8 @@ class AlarmSkill(ScheduledCRUDSkill):
def initialize(self):
super(AlarmSkill, self).initialize()
intent = IntentBuilder('AlarmSkillStopIntent').require('AlarmSkillStopVerb') \
intent = IntentBuilder(
'AlarmSkillStopIntent').require('AlarmSkillStopVerb') \
.require('AlarmSkillKeyword').build()
self.register_intent(intent, self.__handle_stop)

View File

@ -31,19 +31,24 @@ class AudioRecordSkill(ScheduledSkill):
def initialize(self):
self.load_data_files(dirname(__file__))
intent = IntentBuilder("AudioRecordSkillIntent").require("AudioRecordSkillKeyword").build()
intent = IntentBuilder("AudioRecordSkillIntent").require(
"AudioRecordSkillKeyword").build()
self.register_intent(intent, self.handle_record)
intent = IntentBuilder('AudioRecordSkillStopIntent').require('AudioRecordSkillStopVerb') \
intent = IntentBuilder('AudioRecordSkillStopIntent').require(
'AudioRecordSkillStopVerb') \
.require('AudioRecordSkillKeyword').build()
self.register_intent(intent, self.handle_stop)
intent = IntentBuilder('AudioRecordSkillPlayIntent').require('AudioRecordSkillPlayVerb') \
intent = IntentBuilder('AudioRecordSkillPlayIntent').require(
'AudioRecordSkillPlayVerb') \
.require('AudioRecordSkillKeyword').build()
self.register_intent(intent, self.handle_play)
intent = IntentBuilder('AudioRecordSkillStopPlayIntent').require('AudioRecordSkillStopVerb') \
.require('AudioRecordSkillPlayVerb').require('AudioRecordSkillKeyword').build()
intent = IntentBuilder('AudioRecordSkillStopPlayIntent').require(
'AudioRecordSkillStopVerb') \
.require('AudioRecordSkillPlayVerb').require(
'AudioRecordSkillKeyword').build()
self.register_intent(intent, self.handle_stop_play)
def handle_record(self, message):
@ -55,7 +60,8 @@ class AudioRecordSkill(ScheduledSkill):
self.notify_time = now
self.feedback_start()
time.sleep(3)
self.record_process = record(self.file_path, self.duration, self.rate, self.channels)
self.record_process = record(
self.file_path, self.duration, self.rate, self.channels)
self.schedule()
else:
self.speak_dialog("audio.record.disk.full")
@ -76,7 +82,8 @@ class AudioRecordSkill(ScheduledSkill):
def feedback_start(self):
if self.duration > 0:
self.speak_dialog('audio.record.start.duration', {'duration': self.duration})
self.speak_dialog(
'audio.record.start.duration', {'duration': self.duration})
else:
self.speak_dialog('audio.record.start')

View File

@ -28,4 +28,4 @@ class CerberusConfigSkill(MycroftSkill):
def create_skill():
return CerberusConfigSkill()
return CerberusConfigSkill()

View File

@ -17,11 +17,16 @@ class SkillContainer(object):
def __init__(self, args):
parser = argparse.ArgumentParser()
parser.add_argument("--dependency-dir", default="./lib")
parser.add_argument("--messagebus-host", default=messagebus_config.get("host"))
parser.add_argument("--messagebus-port", type=int, default=messagebus_config.get("port"))
parser.add_argument(
"--messagebus-host", default=messagebus_config.get("host"))
parser.add_argument(
"--messagebus-port", type=int,
default=messagebus_config.get("port"))
parser.add_argument("--use-ssl", action='store_true', default=False)
parser.add_argument("--enable-intent-skill", action='store_true', default=False)
parser.add_argument("skill_directory", default=os.path.dirname(__file__))
parser.add_argument(
"--enable-intent-skill", action='store_true', default=False)
parser.add_argument(
"skill_directory", default=os.path.dirname(__file__))
parsed_args = parser.parse_args(args)
if os.path.exists(parsed_args.dependency_dir):

View File

@ -31,15 +31,20 @@ def load_vocab_from_file(path, vocab_type, emitter):
parts = line.strip().split("|")
entity = parts[0]
emitter.emit(Message("register_vocab", metadata={'start': entity, 'end': vocab_type}))
emitter.emit(
Message("register_vocab",
metadata={'start': entity, 'end': vocab_type}))
for alias in parts[1:]:
emitter.emit(
Message("register_vocab", metadata={'start': alias, 'end': vocab_type, 'alias_of': entity}))
Message("register_vocab",
metadata={'start': alias, 'end': vocab_type,
'alias_of': entity}))
def load_vocabulary(basedir, emitter):
for vocab_type in os.listdir(basedir):
load_vocab_from_file(join(basedir, vocab_type), splitext(vocab_type)[0], emitter)
load_vocab_from_file(
join(basedir, vocab_type), splitext(vocab_type)[0], emitter)
def create_intent_envelope(intent):
@ -56,16 +61,22 @@ def open_intent_envelope(message):
def load_skill(skill_descriptor, emitter):
try:
skill_module = imp.load_module(skill_descriptor["name"] + MainModule, *skill_descriptor["info"])
if hasattr(skill_module, 'create_skill') and callable(skill_module.create_skill): # v2 skills framework
skill_module = imp.load_module(
skill_descriptor["name"] + MainModule, *skill_descriptor["info"])
if (hasattr(skill_module, 'create_skill') and
callable(skill_module.create_skill)):
# v2 skills framework
skill = skill_module.create_skill()
skill.bind(emitter)
skill.initialize()
return skill
else:
logger.warn("Module %s does not appear to be skill" % (skill_descriptor["name"]))
logger.warn(
"Module %s does not appear to be skill" % (
skill_descriptor["name"]))
except:
logger.error("Failed to load skill: " + skill_descriptor["name"], exc_info=True)
logger.error(
"Failed to load skill: " + skill_descriptor["name"], exc_info=True)
return None
@ -74,7 +85,8 @@ def get_skills(skills_folder):
possible_skills = os.listdir(skills_folder)
for i in possible_skills:
location = join(skills_folder, i)
if not isdir(location) or not MainModule + ".py" in os.listdir(location):
if (not isdir(location) or
not MainModule + ".py" in os.listdir(location)):
continue
skills.append(create_skill_descriptor(location))
@ -94,13 +106,15 @@ def load_skills(emitter, skills_root=SKILLS_BASEDIR):
load_skill(skill, emitter)
for skill in skills:
if skill['name'] not in PRIMARY_SKILLS and skill['name'] not in BLACKLISTED_SKILLS:
if (skill['name'] not in PRIMARY_SKILLS and
skill['name'] not in BLACKLISTED_SKILLS):
load_skill(skill, emitter)
class MycroftSkill(object):
"""
Abstract base class which provides common behaviour and parameters to all Skills implementation.
Abstract base class which provides common behaviour and parameters to all
Skills implementation.
"""
def __init__(self, name, emitter=None):
@ -134,7 +148,8 @@ class MycroftSkill(object):
def detach(self):
for name in self.registered_intents:
self.emitter.emit(Message("detach_intent", metadata={"intent_name": name}))
self.emitter.emit(
Message("detach_intent", metadata={"intent_name": name}))
def initialize(self):
"""
@ -155,17 +170,24 @@ class MycroftSkill(object):
handler(message)
except:
# TODO: Localize
self.speak("An error occurred while processing a request in " + self.name)
logger.error("An error occurred while processing a request in " + self.name, exc_info=True)
self.speak(
"An error occurred while processing a request in " +
self.name)
logger.error(
"An error occurred while processing a request in " +
self.name, exc_info=True)
self.emitter.on(intent_parser.name, receive_handler)
def register_vocabulary(self, entity, entity_type):
self.emitter.emit(Message('register_vocab', metadata={'start': entity, 'end': entity_type}))
self.emitter.emit(
Message('register_vocab',
metadata={'start': entity, 'end': entity_type}))
def register_regex(self, regex_str):
re.compile(regex_str) # validate regex
self.emitter.emit(Message('register_vocab', metadata={'regex': regex_str}))
self.emitter.emit(
Message('register_vocab', metadata={'regex': regex_str}))
def speak(self, utterance):
self.emitter.emit(Message("speak", metadata={'utterance': utterance}))
@ -174,7 +196,8 @@ class MycroftSkill(object):
self.speak(self.dialog_renderer.render(key, data))
def init_dialog(self, root_directory):
self.dialog_renderer = DialogLoader().load(join(root_directory, 'dialog', self.lang))
self.dialog_renderer = DialogLoader().load(
join(root_directory, 'dialog', self.lang))
def load_data_files(self, root_directory):
self.init_dialog(root_directory)

View File

@ -23,7 +23,8 @@ class TimeSkill(MycroftSkill):
self.register_regex("in (?P<Location>.*)")
self.register_regex("at (?P<Location>.*)")
intent = IntentBuilder("TimeIntent").require("TimeKeyword").optionally("Location").build()
intent = IntentBuilder("TimeIntent").require(
"TimeKeyword").optionally("Location").build()
self.register_intent(intent, self.handle_intent)
@ -43,7 +44,8 @@ class TimeSkill(MycroftSkill):
except:
return None
# This method only handles localtime, for other timezones the task falls to Wolfram.
# This method only handles localtime, for other timezones the task falls
# to Wolfram.
def handle_intent(self, message):
location = message.metadata.get("Location", None)

View File

@ -57,14 +57,18 @@ class DesktopLauncherSkill(MycroftSkill):
self.register_regex("for (?P<SearchTerms>.*) on")
self.register_regex("(?P<SearchTerms>.*) on")
launch_intent = IntentBuilder("LaunchDesktopApplication").require("LaunchKeyword").require(
launch_intent = IntentBuilder(
"LaunchDesktopApplication").require("LaunchKeyword").require(
"Application").build()
self.register_intent(launch_intent, self.handle_launch_desktop_app)
launch_website_intent = IntentBuilder("LaunchWebsiteIntent").require("LaunchKeyword").require("Website").build()
launch_website_intent = IntentBuilder(
"LaunchWebsiteIntent").require("LaunchKeyword").require(
"Website").build()
self.register_intent(launch_website_intent, self.handle_launch_website)
search_website = IntentBuilder("SearchWebsiteIntent").require("SearchKeyword").require("Website").require(
search_website = IntentBuilder("SearchWebsiteIntent").require(
"SearchKeyword").require("Website").require(
"SearchTerms").build()
self.register_intent(search_website, self.handle_search_website)

View File

@ -12,12 +12,18 @@ LOGGER = getLogger(__name__)
class DialCallSkill(MycroftSkill):
DBUS_CMD = ["dbus-send", "--print-reply", "--dest=com.canonical.TelephonyServiceHandler",
"/com/canonical/TelephonyServiceHandler", "com.canonical.TelephonyServiceHandler.StartCall"]
DBUS_CMD = [
"dbus-send", "--print-reply",
"--dest=com.canonical.TelephonyServiceHandler",
"/com/canonical/TelephonyServiceHandler",
"com.canonical.TelephonyServiceHandler.StartCall"
]
def __init__(self):
super(DialCallSkill, self).__init__(name="DialCallSkill")
self.contacts = {'jonathan': '12345678', 'ryan': '23456789', 'sean': '34567890'} # TODO - Use API
self.contacts = {
'jonathan': '12345678', 'ryan': '23456789',
'sean': '34567890'} # TODO - Use API
def initialize(self):
self.load_vocab_files(join(dirname(__file__), 'vocab', 'en-us'))
@ -25,7 +31,8 @@ class DialCallSkill(MycroftSkill):
prefixes = ['call', 'phone'] # TODO - i10n
self.__register_prefixed_regex(prefixes, "(?P<Contact>.*)")
intent = IntentBuilder("DialCallIntent").require("DialCallKeyword").require("Contact").build()
intent = IntentBuilder("DialCallIntent").require(
"DialCallKeyword").require("Contact").build()
self.register_intent(intent, self.handle_intent)
def __register_prefixed_regex(self, prefixes, suffix_regex):
@ -51,7 +58,9 @@ class DialCallSkill(MycroftSkill):
subprocess.call(cmd)
def __notify(self, contact, number):
self.emitter.emit(Message("dial_call", metadata={'contact': contact, 'number': number}))
self.emitter.emit(
Message("dial_call",
metadata={'contact': contact, 'number': number}))
def stop(self):
pass

View File

@ -22,18 +22,25 @@ class IntentSkill(MycroftSkill):
best_intent = None
for utterance in utterances:
try:
best_intent = next(self.engine.determine_intent(utterance, num_results=100))
best_intent['utterance'] = utterance # TODO - Should Adapt handle this?
best_intent = next(self.engine.determine_intent(
utterance, num_results=100))
# TODO - Should Adapt handle this?
best_intent['utterance'] = utterance
except StopIteration, e:
continue
if best_intent and best_intent.get('confidence', 0.0) > 0.0:
reply = message.reply(best_intent.get('intent_type'), metadata=best_intent)
reply = message.reply(
best_intent.get('intent_type'), metadata=best_intent)
self.emitter.emit(reply)
elif len(utterances) == 1:
self.emitter.emit(Message("intent_failure", metadata={"utterance": utterances[0]}))
self.emitter.emit(
Message("intent_failure",
metadata={"utterance": utterances[0]}))
else:
self.emitter.emit(Message("multi_utterance_intent_failure", metadata={"utterances": utterances}))
self.emitter.emit(
Message("multi_utterance_intent_failure",
metadata={"utterances": utterances}))
def handle_register_vocab(self, message):
start_concept = message.metadata.get('start')
@ -43,7 +50,8 @@ class IntentSkill(MycroftSkill):
if regex_str:
self.engine.register_regex_entity(regex_str)
else:
self.engine.register_entity(start_concept, end_concept, alias_of=alias_of)
self.engine.register_entity(
start_concept, end_concept, alias_of=alias_of)
def handle_register_intent(self, message):
intent = open_intent_envelope(message)
@ -51,7 +59,8 @@ class IntentSkill(MycroftSkill):
def handle_detach_intent(self, message):
intent_name = message.metadata.get('intent_name')
new_parsers = [p for p in self.engine.intent_parsers if p.name != intent_name]
new_parsers = [
p for p in self.engine.intent_parsers if p.name != intent_name]
self.engine.intent_parsers = new_parsers
def stop(self):

View File

@ -21,9 +21,14 @@ class IPSkill(MycroftSkill):
def handle_intent(self, message):
self.speak("Here are my available I.P. addresses.")
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
addresses = [
i['addr'] for i in
ifaddresses(ifaceName).setdefault(
AF_INET, [{'addr': 'No IP addr'}])]
if ifaceName != "lo":
self.speak('%s: %s' % ("interface: " + ifaceName + ", I.P. Address ", ', '.join(addresses)))
self.speak('%s: %s' % (
"interface: " + ifaceName +
", I.P. Address ", ', '.join(addresses)))
self.speak("Those are all my I.P. addresses.")
def stop(self):

View File

@ -13,7 +13,8 @@ class NapTimeSkill(MycroftSkill):
super(NapTimeSkill, self).__init__(name="NapTimeSkill")
def initialize(self):
intent_parser = IntentBuilder("NapTimeIntent").require("SleepCommand").build()
intent_parser = IntentBuilder("NapTimeIntent").require(
"SleepCommand").build()
self.register_intent(intent_parser, self.handle_intent)
self.load_vocab_files(join(dirname(__file__), 'vocab', 'en-us'))

View File

@ -22,7 +22,8 @@ class NPRNewsSkill(MycroftSkill):
def initialize(self):
self.load_data_files(dirname(__file__))
intent = IntentBuilder("NPRNewsIntent").require("NPRNewsKeyword").build()
intent = IntentBuilder("NPRNewsIntent").require(
"NPRNewsKeyword").build()
self.register_intent(intent, self.handle_intent)
self.weather.bind(self.emitter)

View File

@ -12,7 +12,8 @@ class PairingSkill(MycroftSkill):
super(PairingSkill, self).__init__(name="PairingSkill")
def initialize(self):
intent = IntentBuilder("PairingIntent").require("DevicePairingPhrase").build()
intent = IntentBuilder("PairingIntent").require(
"DevicePairingPhrase").build()
self.load_data_files(dirname(__file__))
self.register_intent(intent, handler=self.handle_pairing_request)
@ -21,8 +22,9 @@ class PairingSkill(MycroftSkill):
pairing_code = pairing_client.pairing_code
threading.Thread(target=pairing_client.run).start()
self.enclosure.mouth_text("Pairing code is: " + pairing_code)
self.speak_dialog("pairing.instructions", data={"pairing_code": ', ,'.join(pairing_code)})
self.speak_dialog(
"pairing.instructions",
data={"pairing_code": ', ,'.join(pairing_code)})
def stop(self):
pass

View File

@ -13,10 +13,12 @@ __author__ = 'jdorleans'
# TODO - Localization, Sandbox
class ReminderSkill(ScheduledCRUDSkill):
PRONOUNS = {'i': 'you', 'me': 'you', 'my': 'your', 'myself': 'yourself', 'am': 'are', "'m": "are", "i'm": "you're"}
PRONOUNS = {'i': 'you', 'me': 'you', 'my': 'your', 'myself': 'yourself',
'am': 'are', "'m": "are", "i'm": "you're"}
def __init__(self):
super(ReminderSkill, self).__init__("ReminderSkill", None, dirname(__file__))
super(ReminderSkill, self).__init__(
"ReminderSkill", None, dirname(__file__))
self.reminder_on = False
self.max_delay = int(self.config.get('max_delay'))
self.repeat_time = int(self.config.get('repeat_time'))
@ -24,7 +26,8 @@ class ReminderSkill(ScheduledCRUDSkill):
def initialize(self):
super(ReminderSkill, self).initialize()
intent = IntentBuilder('ReminderSkillStopIntent').require('ReminderSkillStopVerb') \
intent = IntentBuilder(
'ReminderSkillStopIntent').require('ReminderSkillStopVerb') \
.require('ReminderSkillKeyword').build()
self.register_intent(intent, self.__handle_stop)
@ -57,7 +60,9 @@ class ReminderSkill(ScheduledCRUDSkill):
delay = self.__calculate_delay(self.max_delay)
while self.reminder_on and datetime.now() < delay:
self.speak_dialog('reminder.notify', data=self.build_feedback_payload(timestamp))
self.speak_dialog(
'reminder.notify',
data=self.build_feedback_payload(timestamp))
time.sleep(1)
self.speak_dialog('reminder.stop')
time.sleep(self.repeat_time)
@ -78,7 +83,8 @@ class ReminderSkill(ScheduledCRUDSkill):
def add(self, date, message):
utterance = message.metadata.get('utterance').lower()
utterance = utterance.replace(message.metadata.get('ReminderSkillCreateVerb'), '')
utterance = utterance.replace(
message.metadata.get('ReminderSkillCreateVerb'), '')
utterance = self.__fix_pronouns(utterance)
self.repeat_data[date] = self.time_rules.get_week_days(utterance)
self.data[date] = self.__remove_time(utterance).strip()

View File

@ -14,9 +14,11 @@ __author__ = 'jdorleans'
class ScheduledSkill(MycroftSkill):
"""
Abstract class which provides a repeatable notification behaviour at a specified time.
Abstract class which provides a repeatable notification behaviour at a
specified time.
Skills implementation inherits this class when it needs to schedule a task or a notification.
Skills implementation inherits this class when it needs to schedule a task
or a notification.
"""
DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())
@ -53,7 +55,8 @@ class ScheduledSkill(MycroftSkill):
return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME
def get_formatted_time(self, timestamp):
return datetime.fromtimestamp(timestamp).strftime(self.config_core.get('time.format'))
return datetime.fromtimestamp(timestamp).strftime(
self.config_core.get('time.format'))
@abc.abstractmethod
def get_times(self):
@ -66,11 +69,14 @@ class ScheduledSkill(MycroftSkill):
class ScheduledCRUDSkill(ScheduledSkill):
"""
Abstract CRUD class which provides a repeatable notification behaviour at a specified time.
Abstract CRUD class which provides a repeatable notification behaviour at
a specified time.
It registers CRUD intents and exposes its functions to manipulate a provided ``data``
It registers CRUD intents and exposes its functions to manipulate a
provided ``data``
Skills implementation inherits this class when it needs to schedule a task or a notification with a provided data
Skills implementation inherits this class when it needs to schedule a task
or a notification with a provided data
that can be manipulated by CRUD commands.
E.g. CRUD operations for a Reminder Skill
@ -96,9 +102,12 @@ class ScheduledCRUDSkill(ScheduledSkill):
self.load_repeat_data()
self.load_data_files(self.basedir)
self.register_regex("(?P<" + self.name + "Amount>\d+)")
self.register_intent(self.build_intent_create().build(), self.handle_create)
self.register_intent(self.build_intent_list().build(), self.handle_list)
self.register_intent(self.build_intent_delete().build(), self.handle_delete)
self.register_intent(
self.build_intent_create().build(), self.handle_create)
self.register_intent(
self.build_intent_list().build(), self.handle_list)
self.register_intent(
self.build_intent_delete().build(), self.handle_delete)
self.schedule()
@abc.abstractmethod
@ -110,14 +119,17 @@ class ScheduledCRUDSkill(ScheduledSkill):
pass
def build_intent_create(self):
return IntentBuilder(self.name + 'CreateIntent').require(self.name + 'CreateVerb')
return IntentBuilder(
self.name + 'CreateIntent').require(self.name + 'CreateVerb')
def build_intent_list(self):
return IntentBuilder(self.name + 'ListIntent').require(self.name + 'ListVerb') \
return IntentBuilder(
self.name + 'ListIntent').require(self.name + 'ListVerb') \
.optionally(self.name + 'Amount').require(self.name + 'Keyword')
def build_intent_delete(self):
return IntentBuilder(self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \
return IntentBuilder(
self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \
.optionally(self.name + 'Amount').require(self.name + 'Keyword')
def get_times(self):
@ -136,7 +148,8 @@ class ScheduledCRUDSkill(ScheduledSkill):
self.speak_dialog('schedule.datetime.error')
def feedback_create(self, utc_time):
self.speak_dialog('schedule.create', data=self.build_feedback_payload(utc_time))
self.speak_dialog(
'schedule.create', data=self.build_feedback_payload(utc_time))
def add_sync(self, utc_time, message):
with self.LOCK:
@ -193,7 +206,8 @@ class ScheduledCRUDSkill(ScheduledSkill):
self.speak_dialog('schedule.list.empty')
def feedback_list(self, utc_time):
self.speak_dialog('schedule.list', data=self.build_feedback_payload(utc_time))
self.speak_dialog(
'schedule.list', data=self.build_feedback_payload(utc_time))
def build_feedback_payload(self, utc_time):
timestamp = self.convert_local(float(utc_time))
@ -222,7 +236,8 @@ class ScheduledCRUDSkill(ScheduledSkill):
if amount > 1:
self.speak_dialog('schedule.delete.many', data={'amount': amount})
else:
self.speak_dialog('schedule.delete.single', data={'amount': amount})
self.speak_dialog(
'schedule.delete.single', data={'amount': amount})
# TODO - Localization
def get_amount(self, message, default=None):

View File

@ -12,20 +12,25 @@ LOGGER = getLogger(__name__)
class SendSMSSkill(MycroftSkill):
DBUS_CMD = ["dbus-send", "--print-reply", "--dest=com.canonical.TelephonyServiceHandler",
"/com/canonical/TelephonyServiceHandler", "com.canonical.TelephonyServiceHandler.SendMessage"]
DBUS_CMD = ["dbus-send", "--print-reply",
"--dest=com.canonical.TelephonyServiceHandler",
"/com/canonical/TelephonyServiceHandler",
"com.canonical.TelephonyServiceHandler.SendMessage"]
def __init__(self):
super(SendSMSSkill, self).__init__(name="SendSMSSkill")
self.contacts = {'jonathan': '12345678', 'ryan': '23456789', 'sean': '34567890'} # TODO - Use API
self.contacts = {'jonathan': '12345678', 'ryan': '23456789',
'sean': '34567890'} # TODO - Use API
def initialize(self):
self.load_vocab_files(join(dirname(__file__), 'vocab', 'en-us'))
prefixes = ['tell', 'text', 'message'] # TODO - i10n
self.__register_prefixed_regex(prefixes, "(?P<Contact>\w*) (?P<Message>.*)")
self.__register_prefixed_regex(
prefixes, "(?P<Contact>\w*) (?P<Message>.*)")
intent = IntentBuilder("SendSMSIntent").require("SendSMSKeyword").require("Contact").require("Message").build()
intent = IntentBuilder("SendSMSIntent").require(
"SendSMSKeyword").require("Contact").require("Message").build()
self.register_intent(intent, self.handle_intent)
def __register_prefixed_regex(self, prefixes, suffix_regex):
@ -53,7 +58,11 @@ class SendSMSSkill(MycroftSkill):
subprocess.call(cmd)
def __notify(self, contact, number, msg):
self.emitter.emit(Message("send_sms", metadata={'contact': contact, 'number': number, 'message': msg}))
self.emitter.emit(
Message(
"send_sms",
metadata={'contact': contact, 'number': number,
'message': msg}))
def stop(self):
pass

View File

@ -14,10 +14,12 @@ class SpellingSkill(MycroftSkill):
def initialize(self):
self.load_vocab_files(join(dirname(__file__), 'vocab', 'en-us'))
prefixes = ['spell', 'spell the word', 'spelling of', 'spelling of the word']
prefixes = [
'spell', 'spell the word', 'spelling of', 'spelling of the word']
self.__register_prefixed_regex(prefixes, "(?P<Word>\w+)")
intent = IntentBuilder("SpellingIntent").require("SpellingKeyword").require("Word").build()
intent = IntentBuilder("SpellingIntent").require(
"SpellingKeyword").require("Word").build()
self.register_intent(intent, self.handle_intent)
def __register_prefixed_regex(self, prefixes, suffix_regex):

View File

@ -27,14 +27,17 @@ class AbstractTimeRules(object):
regex = regex.replace('<week_days>', self.rules.get('week_days'))
regex = regex.replace('<months>', self.rules.get('months'))
regex = regex.replace('<mealtimes>', self.rules.get('mealtimes'))
regex = regex.replace('<celebrations>', self.rules.get('celebrations'))
regex = regex.replace('<repeat_time_regex>', self.rules.get('repeat_time_regex'))
regex = regex.replace(
'<celebrations>', self.rules.get('celebrations'))
regex = regex.replace(
'<repeat_time_regex>', self.rules.get('repeat_time_regex'))
self.rules.get('time_regex')[idx] = regex.lower()
# days is an array starting from Monday (0) to Sunday (6)
def get_week_days(self, sentence):
days = None
pattern = re.compile(self.rules.get('repeat_time_regex'), re.IGNORECASE)
pattern = re.compile(
self.rules.get('repeat_time_regex'), re.IGNORECASE)
result = pattern.search(sentence)
if result:
group = result.group()
@ -63,19 +66,30 @@ class TimeRulesEnUs(AbstractTimeRules):
'time_advs': 'today|tonight|tomorrow',
'time_units': 'second|minute|hour|day|week|month|year',
'day_parts': 'dawn|morning|noon|afternoon|evening|night|midnight',
'week_days': 'monday|tuesday|wednesday|thursday|friday|saturday|sunday',
'months': 'january|february|march|april|may|june|july|august|october|september|november|december',
'mealtimes': 'breakfast|lunchtime|teatime|dinnertime|lunch time|tea time|dinner time',
'week_days': (
'monday|tuesday|wednesday|thursday|friday|saturday|sunday'),
'months': (
'january|february|march|april|may|june|july|august|october|'
'september|november|december'),
'mealtimes': (
'breakfast|lunchtime|teatime|dinnertime|lunch time|tea time|'
'dinner time'),
'celebrations': 'easter|christmas',
'repeat_time_regex':
'((every|each|all) (single )?(day|(<week_days>)s?( (and )?(<week_days>)s?)*))|daily|everyday',
'repeat_time_regex': (
'((every|each|all) (single )?(day|(<week_days>)s?( (and )?'
'(<week_days>)s?)*))|daily|everyday'),
'time_regex': [
'(<time_advs>)', '((at|in the|during the|tomorrow)\s)?(<day_parts>)',
'(in )?(a|an|one|two|\d+\.?\d*) (<time_units>)s?( later)?', 'on (<week_days>)',
'(on|the) (\d+(rd|st|nd|th)?\s)?(<months>)( the )?(\s\d+(rd|st|nd|th)?)?(\s?,?\s?\d*)?',
'(<time_advs>)',
'((at|in the|during the|tomorrow)\s)?(<day_parts>)',
'(in )?(a|an|one|two|\d+\.?\d*) (<time_units>)s?( later)?',
'on (<week_days>)',
'(on|the) (\d+(rd|st|nd|th)?\s)?(<months>)( the )?'
'(\s\d+(rd|st|nd|th)?)?(\s?,?\s?\d*)?',
'in (\d\d\d\d)', 'at (<mealtimes>|<celebrations>)',
"(at|by) \d+((:| and )\d+)?( and a (quarter|half))?\s?((a\.?m\.?|p\.?m\.?)|o'clock)?",
'(in |in the )?next (<time_units>|<day_parts>|<week_days>|<months>|<mealtimes>|<celebrations>)s?',
"(at|by) \d+((:| and )\d+)?( and a (quarter|half))?"
"\s?((a\.?m\.?|p\.?m\.?)|o'clock)?",
'(in |in the )?next (<time_units>|<day_parts>|<week_days>'
'|<months>|<mealtimes>|<celebrations>)s?',
'<repeat_time_regex>'
]
}
@ -83,7 +97,8 @@ class TimeRulesEnUs(AbstractTimeRules):
def build_repeat_time_regex(self):
week_days = self.rules.get('week_days')
repeat_time_regex = self.rules.get('repeat_time_regex')
self.rules['repeat_time_regex'] = repeat_time_regex.replace('<week_days>', week_days)
self.rules['repeat_time_regex'] = repeat_time_regex.replace(
'<week_days>', week_days)
def is_all_days(self, group):
for d in [' day', 'daily', 'everyday']:

View File

@ -12,7 +12,8 @@ LOGGER = getLogger(__name__)
class VolumeSkill(MycroftSkill):
VOLUMES = {0: 0, 1: 15, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 70, 8: 80, 9: 90, 10: 95, 11: 100}
VOLUMES = {0: 0, 1: 15, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 70, 8: 80,
9: 90, 10: 95, 11: 100}
def __init__(self):
super(VolumeSkill, self).__init__(name="VolumeSkill")
@ -24,19 +25,24 @@ class VolumeSkill(MycroftSkill):
self.__build_set_volume()
def __build_set_volume(self):
intent = IntentBuilder("SetVolumeIntent").require("VolumeKeyword").require("VolumeAmount").build()
intent = IntentBuilder("SetVolumeIntent").require(
"VolumeKeyword").require("VolumeAmount").build()
self.register_intent(intent, self.handle_set_volume)
intent = IntentBuilder("IncreaseVolumeIntent").require("IncreaseVolumeKeyword").build()
intent = IntentBuilder("IncreaseVolumeIntent").require(
"IncreaseVolumeKeyword").build()
self.register_intent(intent, self.handle_increase_volume)
intent = IntentBuilder("DecreaseVolumeIntent").require("DecreaseVolumeKeyword").build()
intent = IntentBuilder("DecreaseVolumeIntent").require(
"DecreaseVolumeKeyword").build()
self.register_intent(intent, self.handle_decrease_volume)
intent = IntentBuilder("MuteVolumeIntent").require("MuteVolumeKeyword").build()
intent = IntentBuilder("MuteVolumeIntent").require(
"MuteVolumeKeyword").build()
self.register_intent(intent, self.handle_mute_volume)
intent = IntentBuilder("ResetVolumeIntent").require("ResetVolumeKeyword").build()
intent = IntentBuilder("ResetVolumeIntent").require(
"ResetVolumeKeyword").build()
self.register_intent(intent, self.handle_reset_volume)
def handle_set_volume(self, message):
@ -60,7 +66,9 @@ class VolumeSkill(MycroftSkill):
def handle_reset_volume(self, message):
Mixer().setvolume(self.default_volume)
self.speak_dialog('reset.volume', data={'volume': self.get_volume_code(self.default_volume)})
self.speak_dialog(
'reset.volume',
data={'volume': self.get_volume_code(self.default_volume)})
def __update_volume(self, level=0):
mixer = Mixer()

View File

@ -19,7 +19,8 @@ class WeatherSkill(MycroftSkill):
@property
def owm(self):
return OWM(API_key=self.config.get('api_key', ''), identity=IdentityManager().get())
return OWM(API_key=self.config.get('api_key', ''),
identity=IdentityManager().get())
def initialize(self):
self.load_data_files(dirname(__file__))
@ -30,16 +31,19 @@ class WeatherSkill(MycroftSkill):
self.__build_next_day_intent()
def __build_current_intent(self):
intent = IntentBuilder("CurrentWeatherIntent").require("WeatherKeyword").optionally("Location").build()
intent = IntentBuilder("CurrentWeatherIntent").require(
"WeatherKeyword").optionally("Location").build()
self.register_intent(intent, self.handle_current_intent)
def __build_next_hour_intent(self):
intent = IntentBuilder("NextHoursWeatherIntent").require("WeatherKeyword").optionally("Location") \
intent = IntentBuilder("NextHoursWeatherIntent").require(
"WeatherKeyword").optionally("Location") \
.require("NextHours").build()
self.register_intent(intent, self.handle_next_hour_intent)
def __build_next_day_intent(self):
intent = IntentBuilder("NextDayWeatherIntent").require("WeatherKeyword").optionally("Location") \
intent = IntentBuilder("NextDayWeatherIntent").require(
"WeatherKeyword").optionally("Location") \
.require("NextDay").build()
self.register_intent(intent, self.handle_next_day_intent)
@ -66,7 +70,8 @@ class WeatherSkill(MycroftSkill):
def handle_next_hour_intent(self, message):
try:
location = message.metadata.get("Location", self.location)
weather = self.owm.three_hours_forecast(location).get_forecast().get_weathers()[0]
weather = self.owm.three_hours_forecast(
location).get_forecast().get_weathers()[0]
data = self.__build_data_condition(location, weather)
self.speak_dialog('hour.weather', data)
except APICallError as e:
@ -77,15 +82,19 @@ class WeatherSkill(MycroftSkill):
def handle_next_day_intent(self, message):
try:
location = message.metadata.get("Location", self.location)
weather = self.owm.daily_forecast(location).get_forecast().get_weathers()[1]
data = self.__build_data_condition(location, weather, 'day', 'min', 'max')
weather = self.owm.daily_forecast(
location).get_forecast().get_weathers()[1]
data = self.__build_data_condition(
location, weather, 'day', 'min', 'max')
self.speak_dialog('tomorrow.weather', data)
except APICallError as e:
self.__api_error(e)
except Exception as e:
LOGGER.error("Error: {0}".format(e))
def __build_data_condition(self, location, weather, temp='temp', temp_min='temp_min', temp_max='temp_max'):
def __build_data_condition(
self, location, weather, temp='temp', temp_min='temp_min',
temp_max='temp_max'):
data = {
'location': location,
'scale': self.temperature,

View File

@ -32,16 +32,18 @@ def OWM(API_key=None, version=constants.LATEST_OWM_API_VERSION,
support you are currently requesting. Please be aware that malformed
user-defined configuration modules can lead to unwanted behaviour!
:type config_module: str (eg: 'mypackage.mysubpackage.myconfigmodule')
:param language: the language in which you want text results to be returned.
It's a two-characters string, eg: "en", "ru", "it". Defaults to:
``None``, which means use the default language.
:param language: the language in which you want text results to be
returned. It's a two-characters string, eg: "en", "ru", "it". Defaults
to: ``None``, which means use the default language.
:type language: str
:returns: an instance of a proper *OWM* subclass
:raises: *ValueError* when unsupported OWM API versions are provided
"""
if version == "2.5":
if config_module is None:
config_module = "mycroft.skills.weather.owm_repackaged.configuration25_mycroft"
config_module = (
"mycroft.skills.weather.owm_repackaged."
"configuration25_mycroft")
cfg_module = __import__(config_module, fromlist=[''])
from mycroft.skills.weather.owm_repackaged.owm25 import OWM25
if language is None:

View File

@ -62,8 +62,7 @@ weather_code_registry = weathercoderegistry.WeatherCodeRegistry({
"rain": [{
"start": 500,
"end": 531
},
{
}, {
"start": 300,
"end": 321
}],
@ -94,24 +93,21 @@ weather_code_registry = weathercoderegistry.WeatherCodeRegistry({
"tornado": [{
"start": 781,
"end": 781
},
{
}, {
"start": 900,
"end": 900
}],
"storm": [{
"start": 901,
"end": 901
},
{
}, {
"start": 960,
"end": 961
}],
"hurricane": [{
"start": 902,
"end": 902
},
{
}, {
"start": 962,
"end": 962
}]

View File

@ -32,8 +32,9 @@ class OWM25(owm.OWM):
:param cache: a concrete implementation of class *OWMCache* serving as the
cache provider (defaults to a *NullCache* instance)
:type cache: an *OWMCache* concrete instance
:param language: the language in which you want text results to be returned.
It's a two-characters string, eg: "en", "ru", "it". Defaults to: "en"
:param language: the language in which you want text results to be
returned. It's a two-characters string, eg: "en", "ru", "it". Defaults
to: "en"
:type language: str
:returns: an *OWM25* instance
@ -44,14 +45,16 @@ class OWM25(owm.OWM):
if API_key is not None:
OWM25._assert_is_string("API_key", API_key)
self._API_key = API_key
self._httpclient = owmhttpclient.OWMHTTPClient(API_key, cache, identity)
self._httpclient = owmhttpclient.OWMHTTPClient(
API_key, cache, identity)
self._language = language
@staticmethod
def _assert_is_string(name, value):
try:
# Python 2.x
assert isinstance(value, basestring), "'%s' must be a str" % (name,)
assert(isinstance(value, basestring),
"'%s' must be a str" % (name,))
except NameError:
# Python 3.x
assert isinstance(value, str), "'%s' must be a str" % (name,)
@ -114,8 +117,8 @@ class OWM25(owm.OWM):
def city_id_registry(self):
"""
Gives the *CityIDRegistry* singleton instance that can be used to lookup
for city IDs.
Gives the *CityIDRegistry* singleton instance that can be used to
lookup for city IDs.
:returns: a *CityIDRegistry* instance
"""
@ -149,8 +152,9 @@ class OWM25(owm.OWM):
reached
"""
OWM25._assert_is_string("name", name)
json_data = self._httpclient.call_API(OBSERVATION_URL,
{'q': name,'lang': self._language})
json_data = self._httpclient.call_API(
OBSERVATION_URL,
{'q': name, 'lang': self._language})
return self._parsers['observation'].parse_JSON(json_data)
def weather_at_coords(self, lat, lon):
@ -240,8 +244,8 @@ class OWM25(owm.OWM):
def weather_at_station(self, station_id):
"""
Queries the OWM web API for the weather currently observed by a specific
meteostation (eg: 29584)
Queries the OWM web API for the weather currently observed by a
specific meteostation (eg: 29584)
:param station_id: the meteostation ID
:type station_id: int
@ -272,8 +276,8 @@ class OWM25(owm.OWM):
:param lon_top_left: longitude for top-left of bounding box
must be between -180.0 and 180.0
:type lon_top_left: int/float
:param lat_bottom_right: latitude for bottom-right of bounding box, must
be between -90.0 and 90.0
:param lat_bottom_right: latitude for bottom-right of bounding box,
must be between -90.0 and 90.0
:type lat_bottom_right: int/float
:param lon_bottom_right: longitude for bottom-right of bounding box,
must be between -180.0 and 180.0
@ -291,34 +295,34 @@ class OWM25(owm.OWM):
negative values are provided for limit
"""
assert type(lat_top_left) in (float, int), \
"'lat_top_left' must be a float"
"'lat_top_left' must be a float"
assert type(lon_top_left) in (float, int), \
"'lon_top_left' must be a float"
"'lon_top_left' must be a float"
assert type(lat_bottom_right) in (float, int), \
"'lat_bottom_right' must be a float"
"'lat_bottom_right' must be a float"
assert type(lon_bottom_right) in (float, int), \
"'lon_bottom_right' must be a float"
"'lon_bottom_right' must be a float"
assert type(cluster) is bool, "'cluster' must be a bool"
assert type(limit) in (int, type(None)), \
"'limit' must be an int or None"
"'limit' must be an int or None"
if lat_top_left < -90.0 or lat_top_left > 90.0:
raise ValueError("'lat_top_left' value must be between -90 and 90")
if lon_top_left < -180.0 or lon_top_left > 180.0:
raise ValueError("'lon_top_left' value must be between -180 and" \
+" 180")
raise ValueError("'lon_top_left' value must be between -180 and" +
" 180")
if lat_bottom_right < -90.0 or lat_bottom_right > 90.0:
raise ValueError("'lat_bottom_right' value must be between -90" \
+" and 90")
raise ValueError("'lat_bottom_right' value must be between -90" +
" and 90")
if lon_bottom_right < -180.0 or lon_bottom_right > 180.0:
raise ValueError("'lon_bottom_right' value must be between -180 "\
+"and 180")
raise ValueError("'lon_bottom_right' value must be between -180 " +
"and 180")
if limit is not None and limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'bbox': ','.join([str(lon_top_left),
str(lat_top_left),
str(lon_bottom_right),
str(lat_bottom_right)]),
'cluster': 'yes' if cluster else 'no',}
'cluster': 'yes' if cluster else 'no', }
if limit is not None:
params['cnt'] = limit
@ -376,8 +380,9 @@ class OWM25(owm.OWM):
reached
"""
OWM25._assert_is_string("name", name)
json_data = self._httpclient.call_API(THREE_HOURS_FORECAST_URL,
{'q': name, 'lang': self._language})
json_data = self._httpclient.call_API(
THREE_HOURS_FORECAST_URL,
{'q': name, 'lang': self._language})
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("3h")
@ -487,7 +492,8 @@ class OWM25(owm.OWM):
def daily_forecast_at_coords(self, lat, lon, limit=None):
"""
Queries the OWM web API for daily weather forecast for the specified
geographic coordinate (eg: latitude: 51.5073509, longitude: -0.1277583).
geographic coordinate (eg: latitude: 51.5073509,
longitude: -0.1277583).
A *Forecaster* object is returned, containing a *Forecast* instance
covering a global streak of fourteen days by default: this instance
encapsulates *Weather* objects, with a time interval of one day one
@ -567,7 +573,6 @@ class OWM25(owm.OWM):
else:
return None
def weather_history_at_place(self, name, start=None, end=None):
"""
Queries the OWM web API for weather history for the specified location
@ -602,16 +607,16 @@ class OWM25(owm.OWM):
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
raise ValueError("Error: the start time boundary must "
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
raise ValueError("Error: the start time boundary must "
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
raise ValueError("Error: one of the time boundaries is None, "
"while the other is not!")
json_data = self._httpclient.call_API(CITY_WEATHER_HISTORY_URL,
params)
@ -653,16 +658,16 @@ class OWM25(owm.OWM):
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
raise ValueError("Error: the start time boundary must "
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
raise ValueError("Error: the start time boundary must "
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
raise ValueError("Error: one of the time boundaries is None, "
"while the other is not!")
json_data = self._httpclient.call_API(CITY_WEATHER_HISTORY_URL,
params)
@ -824,8 +829,9 @@ class OWM25(owm.OWM):
def __repr__(self):
return "<%s.%s - API key=%s, OWM web API version=%s, " \
"PyOWM version=%s, language=%s>" % (__name__,
self.__class__.__name__,
self._API_key, self.get_API_version(),
self.get_version(),
self._language)
"PyOWM version=%s, language=%s>" % (
__name__,
self.__class__.__name__,
self._API_key, self.get_API_version(),
self.get_version(),
self._language)

View File

@ -57,19 +57,22 @@ class OWMHTTPClient(object):
else:
try:
if self._identity and self._identity.token:
bearer_token_header = "Bearer %s:%s" % (self._identity.device_id, self._identity.token)
bearer_token_header = "Bearer %s:%s" % (
self._identity.device_id, self._identity.token)
else:
bearer_token_header = None
try:
from urllib.request import urlopen, build_opener
opener = build_opener()
if bearer_token_header:
opener.addheaders = [('Authorization', bearer_token_header)]
opener.addheaders = [
('Authorization', bearer_token_header)]
except ImportError:
from urllib2 import urlopen, build_opener
opener = build_opener()
if bearer_token_header:
opener.addheaders = [('Authorization', bearer_token_header)]
opener.addheaders = [
('Authorization', bearer_token_header)]
response = opener.open(url, None, timeout)
except HTTPError as e:
raise api_call_error.APICallError(str(e.reason), e)

View File

@ -20,16 +20,22 @@ class WikipediaSkill(MycroftSkill):
self.max_results = int(self.config['max_results'])
self.max_phrases = int(self.config['max_phrases'])
self.question = 'Would you like to know more about ' # TODO - i10n
self.feedback_prefix = read_stripped_lines(join(dirname(__file__), 'dialog', self.lang, 'FeedbackPrefix.dialog'))
self.feedback_search = read_stripped_lines(join(dirname(__file__), 'dialog', self.lang, 'FeedbackSearch.dialog'))
self.feedback_prefix = read_stripped_lines(
join(dirname(__file__), 'dialog', self.lang,
'FeedbackPrefix.dialog'))
self.feedback_search = read_stripped_lines(
join(dirname(__file__), 'dialog', self.lang,
'FeedbackSearch.dialog'))
def initialize(self):
self.load_vocab_files(join(dirname(__file__), 'vocab', 'en-us'))
prefixes = ['wiki', 'wikipedia', 'tell me about', 'tell us about', 'who is', 'who was'] # TODO - i10n
prefixes = ['wiki', 'wikipedia', 'tell me about', 'tell us about',
'who is', 'who was'] # TODO - i10n
self.__register_prefixed_regex(prefixes, "(?P<ArticleTitle>.*)")
intent = IntentBuilder("WikipediaIntent").require("WikipediaKeyword").require("ArticleTitle").build()
intent = IntentBuilder("WikipediaIntent").require(
"WikipediaKeyword").require("ArticleTitle").build()
self.register_intent(intent, self.handle_intent)
def __register_prefixed_regex(self, prefixes, suffix_regex):
@ -41,7 +47,9 @@ class WikipediaSkill(MycroftSkill):
title = message.metadata.get("ArticleTitle")
self.__feedback_search(title)
results = wiki.search(title, self.max_results)
summary = re.sub(r'\([^)]*\)|/[^/]*/', '', wiki.summary(results[0], self.max_phrases))
summary = re.sub(
r'\([^)]*\)|/[^/]*/', '',
wiki.summary(results[0], self.max_phrases))
self.speak(summary)
except wiki.exceptions.DisambiguationError as e:
@ -55,7 +63,8 @@ class WikipediaSkill(MycroftSkill):
def __feedback_search(self, title):
prefix = self.feedback_prefix[randrange(len(self.feedback_prefix))]
feedback = self.feedback_search[randrange(len(self.feedback_search))]
sentence = feedback.replace('<prefix>', prefix).replace('<title>', title)
sentence = feedback.replace('<prefix>', prefix).replace(
'<title>', title)
self.speak(sentence)
def __ask_more_about(self, opts):

View File

@ -18,14 +18,19 @@ logger = getLogger(__name__)
class EnglishQuestionParser(object):
"""
Poor-man's english question parser. Not even close to conclusive, but appears to construct some decent w|a queries
and responses.
Poor-man's english question parser. Not even close to conclusive, but
appears to construct some decent w|a queries and responses.
"""
def __init__(self):
self.regexes = [
re.compile(".*(?P<QuestionWord>who|what|when|where|why|which) (?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) (?P<Query2>.*)"),
re.compile(".*(?P<QuestionWord>who|what|when|where|why|which) (?P<QuestionVerb>\w+) (?P<Query>.*)")
re.compile(
".*(?P<QuestionWord>who|what|when|where|why|which) "
"(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) "
"(?P<Query2>.*)"),
re.compile(
".*(?P<QuestionWord>who|what|when|where|why|which) "
"(?P<QuestionVerb>\w+) (?P<Query>.*)")
]
def _normalize(self, groupdict):
@ -35,7 +40,8 @@ class EnglishQuestionParser(object):
return {
'QuestionWord': groupdict.get('QuestionWord'),
'QuestionVerb': groupdict.get('QuestionVerb'),
'Query': ' '.join([groupdict.get('Query1'), groupdict.get('Query2')])
'Query': ' '.join([groupdict.get('Query1'), groupdict.get(
'Query2')])
}
def parse(self, utterance):
@ -85,7 +91,8 @@ class WolframAlphaSkill(MycroftSkill):
self.emitter.on('intent_failure', self.handle_fallback)
def handle_fallback(self, message):
logger.debug("Could not determine intent, falling back to WolframAlpha Skill!")
logger.debug(
"Could not determine intent, falling back to WolframAlpha Skill!")
utterance = message.metadata.get('utterance')
parsed_question = self.question_parser.parse(utterance)
@ -96,7 +103,9 @@ class WolframAlphaSkill(MycroftSkill):
self.speak("I am searching for " + utterance)
query = utterance
if parsed_question:
query = "%s %s %s" % (parsed_question.get('QuestionWord'), parsed_question.get('QuestionVerb'), parsed_question.get('Query'))
query = "%s %s %s" % (parsed_question.get('QuestionWord'),
parsed_question.get('QuestionVerb'),
parsed_question.get('Query'))
try:
res = self.client.query(query)
@ -115,7 +124,8 @@ class WolframAlphaSkill(MycroftSkill):
result = self.__find_value(res.pods, 'Value')
if not result:
result = self.__find_value(res.pods, 'DecimalApproximation')
result = self.__find_value(
res.pods, 'DecimalApproximation')
result = result[:5]
except:
pass
@ -125,7 +135,8 @@ class WolframAlphaSkill(MycroftSkill):
verb = "is"
structured_syntax_regex = re.compile(".*(\||\[|\\\\|\]).*")
if parsed_question:
if not input_interpretation or structured_syntax_regex.match(input_interpretation):
if not input_interpretation or structured_syntax_regex.match(
input_interpretation):
input_interpretation = parsed_question.get('Query')
verb = parsed_question.get('QuestionVerb')

View File

@ -12,7 +12,8 @@ class TTS(object):
"""
TTS abstract class to be implemented by all TTS engines.
It aggregates the minimum required parameters and exposes ``execute(sentence)`` function.
It aggregates the minimum required parameters and exposes
``execute(sentence)`` function.
"""
def __init__(self, lang, voice, filename='/tmp/tts.wav'):
@ -30,7 +31,8 @@ class TTSValidator(object):
"""
TTS Validator abstract class to be implemented by all TTS engines.
It exposes and implements ``validate(tts)`` function as a template to validate the TTS engines.
It exposes and implements ``validate(tts)`` function as a template to
validate the TTS engines.
"""
def __init__(self):
@ -45,16 +47,19 @@ class TTSValidator(object):
def __validate_instance(self, tts):
instance = self.get_instance()
if not isinstance(tts, instance):
raise AttributeError('tts must be instance of ' + instance.__name__)
raise AttributeError(
'tts must be instance of ' + instance.__name__)
LOGGER.debug('TTS: ' + str(instance))
def __validate_filename(self, filename):
if not (filename and filename.endswith('.wav')):
raise AttributeError('filename: ' + filename + ' must be a .wav file!')
raise AttributeError(
'filename: ' + filename + ' must be a .wav file!')
dir_path = dirname(filename)
if not (exists(dir_path) and isdir(dir_path)):
raise AttributeError('filename: ' + filename + ' is not a valid file path!')
raise AttributeError(
'filename: ' + filename + ' is not a valid file path!')
LOGGER.debug('Filename: ' + filename)
@abc.abstractmethod

View File

@ -12,7 +12,8 @@ class ESpeak(TTS):
super(ESpeak, self).__init__(lang, voice)
def execute(self, sentence):
subprocess.call(['espeak', '-v', self.lang + '+' + self.voice, sentence])
subprocess.call(
['espeak', '-v', self.lang + '+' + self.voice, sentence])
class ESpeakValidator(TTSValidator):
@ -27,7 +28,9 @@ class ESpeakValidator(TTSValidator):
try:
subprocess.call(['espeak', '--version'])
except:
raise Exception('ESpeak is not installed. Run on terminal: sudo apt-get install espeak')
raise Exception(
'ESpeak is not installed. Run on terminal: sudo apt-get '
'install espeak')
def get_instance(self):
return ESpeak

View File

@ -46,7 +46,9 @@ class FATTSValidator(TTSValidator):
if content['product'].find('FA-TTS') < 0:
raise Exception('Invalid FA-TTS server.')
except:
raise Exception('FA-TTS server could not be verified. Check your connection to the server: ' + tts.url)
raise Exception(
'FA-TTS server could not be verified. Check your connection '
'to the server: ' + tts.url)
def get_instance(self):
return FATTS

View File

@ -30,7 +30,9 @@ class GoogleTTSValidator(TTSValidator):
try:
gTTS(text='Hi').save(tts.filename)
except:
raise Exception('GoogleTTS server could not be verified. Please check your internet connection.')
raise Exception(
'GoogleTTS server could not be verified. Please check your '
'internet connection.')
def get_instance(self):
return GoogleTTS

View File

@ -43,7 +43,9 @@ class MaryTTSValidator(TTSValidator):
if resp.content.find('Mary TTS server') < 0:
raise Exception('Invalid MaryTTS server.')
except:
raise Exception('MaryTTS server could not be verified. Check your connection to the server: ' + tts.url)
raise Exception(
'MaryTTS server could not be verified. Check your connection '
'to the server: ' + tts.url)
def get_instance(self):
return MaryTTS

View File

@ -10,7 +10,8 @@ __author__ = 'jdorleans'
config = ConfigurationManager.get_config().get("tts", {})
NAME = 'mimic'
BIN = config.get("mimic.path", join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
BIN = config.get(
"mimic.path", join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
class Mimic(TTS):
@ -32,7 +33,9 @@ class MimicValidator(TTSValidator):
try:
subprocess.call([BIN, '--version'])
except:
raise Exception('Mimic is not installed. Make sure install-mimic.sh ran properly.')
raise Exception(
'Mimic is not installed. Make sure install-mimic.sh ran '
'properly.')
def get_instance(self):
return Mimic

View File

@ -16,7 +16,8 @@ class RemoteTTS(TTS):
"""
Abstract class for a Remote TTS engine implementation.
It provides a common logic to perform multiple requests by splitting the whole sentence into small ones.
It provides a common logic to perform multiple requests by splitting the
whole sentence into small ones.
"""
def __init__(self, lang, voice, url, api_path):
@ -49,7 +50,9 @@ class RemoteTTS(TTS):
return reqs
def __request(self, p):
return self.session.get(self.url + self.api_path, params=self.build_request_params(p), timeout=10, verify=False)
return self.session.get(
self.url + self.api_path, params=self.build_request_params(p),
timeout=10, verify=False)
@abc.abstractmethod
def build_request_params(self, sentence):
@ -61,7 +64,9 @@ class RemoteTTS(TTS):
self.__save(resp.content)
play_wav(self.filename)
else:
LOGGER.error('%s Http Error: %s for url: %s' % (resp.status_code, resp.reason, resp.url))
LOGGER.error(
'%s Http Error: %s for url: %s' %
(resp.status_code, resp.reason, resp.url))
def __save(self, data):
with open(self.filename, 'wb') as f:

View File

@ -12,7 +12,8 @@ class SpdSay(TTS):
super(SpdSay, self).__init__(lang, voice)
def execute(self, sentence):
subprocess.call(['spd-say', '-l', self.lang, '-t', self.voice, sentence])
subprocess.call(
['spd-say', '-l', self.lang, '-t', self.voice, sentence])
class SpdSayValidator(TTSValidator):
@ -27,7 +28,9 @@ class SpdSayValidator(TTSValidator):
try:
subprocess.call(['spd-say', '--version'])
except:
raise Exception('SpdSay is not installed. Run on terminal: sudo apt-get install speech-dispatcher')
raise Exception(
'SpdSay is not installed. Run on terminal: sudo apt-get'
'install speech-dispatcher')
def get_instance(self):
return SpdSay

View File

@ -15,7 +15,8 @@ def create():
"""
Factory method to create a TTS engine based on configuration.
The configuration file ``defaults.ini`` contains a ``tts`` section with the name of a TTS module to be read by this method.
The configuration file ``defaults.ini`` contains a ``tts`` section with
the name of a TTS module to be read by this method.
[tts]

View File

@ -21,9 +21,12 @@ def play_mp3(file_path):
def record(file_path, duration, rate, channels):
if duration > 0:
return subprocess.Popen(["arecord", "-r", str(rate), "-c", str(channels), "-d", str(duration), file_path])
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), "-d",
str(duration), file_path])
else:
return subprocess.Popen(["arecord", "-r", str(rate), "-c", str(channels), file_path])
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), file_path])
def remove_last_slash(url):
@ -66,5 +69,6 @@ def kill(names):
except:
pass
class CerberusAccessDenied(Exception):
pass
pass

View File

@ -6,7 +6,8 @@ import argparse
__author__ = 'seanfitz'
"""
Audio Test
A tool for recording X seconds of audio, and then playing them back. Useful for testing hardware, and ensures
A tool for recording X seconds of audio, and then playing them back. Useful
for testing hardware, and ensures
compatibility with mycroft recognizer loop code.
"""
@ -22,8 +23,12 @@ def record(filename, duration):
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)")
parser.add_argument('-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)")
parser.add_argument(
'-f', '--filename', dest='filename', default="/tmp/test.wav",
help="Filename for saved audio (Default: /tmp/test.wav)")
parser.add_argument(
'-d', '--duration', dest='duration', type=int, default=10,
help="Duration of recording in seconds (Default: 10)")
args = parser.parse_args()
record(args.filename, args.duration)

View File

@ -1,6 +1,7 @@
import logging
__author__ = 'seanfitz'
import logging
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger("MYCROFT")

View File

@ -8,9 +8,11 @@ __author__ = 'seanfitz'
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
def place_manifest(manifest_file):
shutil.copy(manifest_file, "MANIFEST.in")
def get_version():
version = None
try:
@ -18,7 +20,8 @@ def get_version():
version = mycroft.__version__.version
except Exception, e:
try:
version = "dev-" + subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
version = "dev-" + subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"]).strip()
except subprocess.CalledProcessError, e2:
version = "development"
@ -33,4 +36,6 @@ def required(requirements_file):
def find_all_packages(where):
packages = find_packages(where=where, exclude=["*test*"])
return [os.path.join(where, pkg.replace(".", os.sep)) for pkg in packages] + [where]
return [
os.path.join(where, pkg.replace(".", os.sep))
for pkg in packages] + [where]

View File

@ -35,4 +35,4 @@ setup(
'mycroft-skill-container=mycroft.skills.container:main'
]
}
)
)

View File

@ -1 +1,2 @@
xmlrunner==1.7.7
pep8
xmlrunner==1.7.7

View File

@ -2,8 +2,15 @@ from Queue import Queue
from os.path import dirname, join
import unittest
from speech_recognition import WavFile, AudioData
from mycroft.client.speech.listener import WakewordExtractor, AudioConsumer, RecognizerLoop
from mycroft.client.speech.recognizer_wrapper import RemoteRecognizerWrapperFactory
from mycroft.client.speech.listener import (
WakewordExtractor,
AudioConsumer,
RecognizerLoop
)
from mycroft.client.speech.recognizer_wrapper import (
RemoteRecognizerWrapperFactory
)
__author__ = 'seanfitz'
@ -34,27 +41,38 @@ class AudioConsumerTest(unittest.TestCase):
self.loop,
self.loop.wakeup_recognizer,
self.loop.ww_recognizer,
RemoteRecognizerWrapperFactory.wrap_recognizer(self.recognizer, 'google'),
RemoteRecognizerWrapperFactory.wrap_recognizer(
self.recognizer, 'google'),
self.loop.wakeup_prefixes,
self.loop.wakeup_words)
def __create_sample_from_test_file(self, sample_name):
root_dir = dirname(dirname(dirname(__file__)))
filename = join(root_dir, 'test', 'client', 'data', sample_name + '.wav')
filename = join(
root_dir, 'test', 'client', 'data', sample_name + '.wav')
wavfile = WavFile(filename)
with wavfile as source:
return AudioData(source.stream.read(), wavfile.SAMPLE_RATE, wavfile.SAMPLE_WIDTH)
return AudioData(
source.stream.read(), wavfile.SAMPLE_RATE,
wavfile.SAMPLE_WIDTH)
def test_audio_pos_front_back(self):
audio = self.__create_sample_from_test_file('mycroft_in_utterance')
self.queue.put(audio)
TRUE_POS_BEGIN = 69857 + int(WakewordExtractor.TRIM_SECONDS * audio.sample_rate * audio.sample_width)
TRUE_POS_END = 89138 - int(WakewordExtractor.TRIM_SECONDS * audio.sample_rate * audio.sample_width)
TRUE_POS_BEGIN = 69857 + int(
WakewordExtractor.TRIM_SECONDS * audio.sample_rate *
audio.sample_width)
TRUE_POS_END = 89138 - int(
WakewordExtractor.TRIM_SECONDS * audio.sample_rate *
audio.sample_width)
TOLERANCE_RANGE_FRAMES = WakewordExtractor.MAX_ERROR_SECONDS * audio.sample_rate * audio.sample_width
TOLERANCE_RANGE_FRAMES = (
WakewordExtractor.MAX_ERROR_SECONDS * audio.sample_rate *
audio.sample_width)
monitor = {}
self.recognizer.set_transcriptions(["what's the weather next week", ""])
self.recognizer.set_transcriptions(
["what's the weather next week", ""])
def wakeword_callback(message):
monitor['pos_begin'] = message.get('pos_begin')
@ -66,17 +84,22 @@ class AudioConsumerTest(unittest.TestCase):
pos_begin = monitor.get('pos_begin')
self.assertIsNotNone(pos_begin)
diff = abs(pos_begin - TRUE_POS_BEGIN)
self.assertTrue(diff <= TOLERANCE_RANGE_FRAMES, str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))
self.assertTrue(
diff <= TOLERANCE_RANGE_FRAMES,
str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))
pos_end = monitor.get('pos_end')
self.assertIsNotNone(pos_end)
diff = abs(pos_end - TRUE_POS_END)
self.assertTrue(diff <= TOLERANCE_RANGE_FRAMES, str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))
self.assertTrue(
diff <= TOLERANCE_RANGE_FRAMES,
str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))
def test_wakeword_in_beginning(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
monitor = {}
self.recognizer.set_transcriptions(["what's the weather next week", ""])
self.recognizer.set_transcriptions([
"what's the weather next week", ""])
def callback(message):
monitor['utterances'] = message.get('utterances')
@ -91,7 +114,8 @@ class AudioConsumerTest(unittest.TestCase):
def test_wakeword_in_phrase(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
monitor = {}
self.recognizer.set_transcriptions(["he can do other stuff too", "what's the weather in cincinnati"])
self.recognizer.set_transcriptions([
"he can do other stuff too", "what's the weather in cincinnati"])
def callback(message):
monitor['utterances'] = message.get('utterances')
@ -107,7 +131,7 @@ class AudioConsumerTest(unittest.TestCase):
def test_call_and_response(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
monitor = {}
self.recognizer.set_transcriptions(["mycroft",""])
self.recognizer.set_transcriptions(["mycroft", ""])
def wakeword_callback(message):
monitor['wakeword'] = message.get('utterance')
@ -121,7 +145,8 @@ class AudioConsumerTest(unittest.TestCase):
self.assertIsNotNone(monitor.get('wakeword'))
self.queue.put(self.__create_sample_from_test_file('mycroft'))
self.recognizer.set_transcriptions(["what's the weather next week", ""])
self.recognizer.set_transcriptions(
["what's the weather next week", ""])
self.loop.once('recognizer_loop:utterance', utterance_callback)
self.consumer.try_consume_audio()

View File

@ -50,7 +50,9 @@ class DynamicEnergytest(unittest.TestCase):
recognizer = Recognizer()
recognizer.listen(source)
higher_base_energy = audioop.rms(higher_base, 2)
# after recalibration (because of max audio length) new threshold should be >= 1.5 * higher_base_energy
delta_below_threshold = recognizer.energy_threshold - higher_base_energy
# after recalibration (because of max audio length) new threshold
# should be >= 1.5 * higher_base_energy
delta_below_threshold = (
recognizer.energy_threshold - higher_base_energy)
min_delta = higher_base_energy * .5
assert abs(delta_below_threshold - min_delta) < 1
assert abs(delta_below_threshold - min_delta) < 1

View File

@ -3,11 +3,13 @@ import os
from mycroft.client.speech import wakeword_recognizer
__author__ = 'seanfitz'
import unittest
DATA_DIR=os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class WakewordRecognizerTest(unittest.TestCase):
def setUp(self):

View File

@ -10,10 +10,17 @@ PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def discover_tests():
tests = {}
skills = [skill for skill in glob.glob(os.path.join(PROJECT_ROOT, 'mycroft/skills/*')) if os.path.isdir(skill)]
skills = [
skill for skill
in glob.glob(os.path.join(PROJECT_ROOT, 'mycroft/skills/*'))
if os.path.isdir(skill)
]
for skill in skills:
test_intent_files = [f for f in glob.glob(os.path.join(skill, 'test/intent/*.intent.json'))]
test_intent_files = [
f for f
in glob.glob(os.path.join(skill, 'test/intent/*.intent.json'))
]
if len(test_intent_files) > 0:
tests[skill] = test_intent_files
@ -31,21 +38,20 @@ class IntentTestSequenceMeta(type):
for skill in tests.keys():
skill_name = os.path.basename(skill)
for example in tests[skill]:
example_name = os.path.basename(os.path.splitext(os.path.splitext(example)[0])[0])
test_name = "test_IntentValidation[%s:%s]" % (skill_name, example_name)
example_name = os.path.basename(
os.path.splitext(os.path.splitext(example)[0])[0])
test_name = "test_IntentValidation[%s:%s]" % (skill_name,
example_name)
d[test_name] = gen_test(skill, example)
return type.__new__(mcs, name, bases, d)
class IntentTestSequence(unittest.TestCase):
__metaclass__ = IntentTestSequenceMeta
def setUp(self):
self.emitter = MockSkillsLoader(os.path.join(PROJECT_ROOT, 'mycroft', 'skills')).load_skills()
self.emitter = MockSkillsLoader(
os.path.join(PROJECT_ROOT, 'mycroft', 'skills')).load_skills()
if __name__ == '__main__':
unittest.main()

View File

@ -12,7 +12,11 @@ class RegistrationOnlyEmitter(object):
self.emitter = EventEmitter()
def on(self, event, f):
if event in ['register_intent', 'register_vocab', 'recognizer_loop:utterance']:
if event in [
'register_intent',
'register_vocab',
'recognizer_loop:utterance'
]:
self.emitter.on(event, f)
def emit(self, event, *args, **kwargs):
@ -27,7 +31,7 @@ class MockSkillsLoader(object):
def load_skills(self):
load_skills(self.emitter, self.skills_root)
return self.emitter.emitter # kick out the underlying emitter
return self.emitter.emitter # kick out the underlying emitter
class SkillTest(object):
@ -40,7 +44,9 @@ class SkillTest(object):
def compare_intents(self, expected, actual):
for key in expected.keys():
if actual.get(key, "").lower() != expected.get(key, "").lower():
print "Expected %s: %s, Actual: %s" % (key, expected.get(key), actual.get(key))
print(
"Expected %s: %s, Actual: %s" % (key, expected.get(key),
actual.get(key)))
assert False
def run(self):
@ -51,8 +57,9 @@ class SkillTest(object):
self.compare_intents(example_json.get('intent'), intent.metadata)
self.returned_intent = True
self.emitter.once(example_json.get('intent_type'), compare)
self.emitter.emit('recognizer_loop:utterance', Message('recognizer_loop:utterance', event))
self.emitter.emit(
'recognizer_loop:utterance',
Message('recognizer_loop:utterance', event))
if not self.returned_intent:
print("No intent handled")
assert False

View File

@ -1,6 +1,5 @@
from mycroft.configuration.config import ConfigurationManager
__author__ = 'seanfitz'
import unittest
from xmlrunner import XMLTestRunner
@ -8,6 +7,9 @@ import os
import sys
__author__ = 'seanfitz'
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
OUTPUT_DIR = os.path.dirname(os.path.dirname(__file__))
@ -18,4 +20,4 @@ tests = loader.discover(TEST_DIR, pattern="*_test*.py")
runner = XMLTestRunner(output="./build/report/tests")
result = runner.run(tests)
if fail_on_error and len(result.failures + result.errors) > 0:
sys.exit(1)
sys.exit(1)