2023-04-04 04:06:51 +00:00
|
|
|
"""Test Voice Assistant init."""
|
2023-04-17 14:33:53 +00:00
|
|
|
from dataclasses import asdict
|
2023-04-04 04:06:51 +00:00
|
|
|
|
|
|
|
from syrupy.assertion import SnapshotAssertion
|
|
|
|
|
2023-04-13 21:25:38 +00:00
|
|
|
from homeassistant.components import assist_pipeline, stt
|
2023-04-19 13:30:29 +00:00
|
|
|
from homeassistant.core import Context, HomeAssistant
|
2023-04-04 04:06:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
async def test_pipeline_from_audio_stream(
|
2023-04-06 16:55:16 +00:00
|
|
|
hass: HomeAssistant, mock_stt_provider, init_components, snapshot: SnapshotAssertion
|
2023-04-04 04:06:51 +00:00
|
|
|
) -> None:
|
|
|
|
"""Test creating a pipeline from an audio stream."""
|
|
|
|
|
|
|
|
events = []
|
|
|
|
|
|
|
|
async def audio_data():
|
|
|
|
yield b"part1"
|
|
|
|
yield b"part2"
|
|
|
|
yield b""
|
|
|
|
|
2023-04-13 21:25:38 +00:00
|
|
|
await assist_pipeline.async_pipeline_from_audio_stream(
|
2023-04-04 04:06:51 +00:00
|
|
|
hass,
|
2023-04-19 13:30:29 +00:00
|
|
|
Context(),
|
2023-04-04 04:06:51 +00:00
|
|
|
events.append,
|
|
|
|
stt.SpeechMetadata(
|
|
|
|
language="",
|
|
|
|
format=stt.AudioFormats.WAV,
|
|
|
|
codec=stt.AudioCodecs.PCM,
|
|
|
|
bit_rate=stt.AudioBitRates.BITRATE_16,
|
|
|
|
sample_rate=stt.AudioSampleRates.SAMPLERATE_16000,
|
|
|
|
channel=stt.AudioChannels.CHANNEL_MONO,
|
|
|
|
),
|
|
|
|
audio_data(),
|
|
|
|
)
|
|
|
|
|
|
|
|
processed = []
|
|
|
|
for event in events:
|
2023-04-17 14:33:53 +00:00
|
|
|
as_dict = asdict(event)
|
2023-04-04 04:06:51 +00:00
|
|
|
as_dict.pop("timestamp")
|
|
|
|
processed.append(as_dict)
|
|
|
|
|
|
|
|
assert processed == snapshot
|
|
|
|
assert mock_stt_provider.received == [b"part1", b"part2"]
|