Add model option to speak action for ElevenLabs (#133902)

pull/135835/head
Simon 2025-01-17 09:18:07 +00:00 committed by GitHub
parent 5e0bbf65e4
commit 85b4be2f16
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 41 additions and 4 deletions

View File

@ -1,5 +1,7 @@
"""Constants for the ElevenLabs text-to-speech integration."""
ATTR_MODEL = "model"
CONF_VOICE = "voice"
CONF_MODEL = "model"
CONF_CONFIGURE_VOICE = "configure_voice"

View File

@ -24,6 +24,7 @@ from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ElevenLabsConfigEntry
from .const import (
ATTR_MODEL,
CONF_OPTIMIZE_LATENCY,
CONF_SIMILARITY,
CONF_STABILITY,
@ -85,7 +86,7 @@ async def async_setup_entry(
class ElevenLabsTTSEntity(TextToSpeechEntity):
"""The ElevenLabs API entity."""
_attr_supported_options = [ATTR_VOICE]
_attr_supported_options = [ATTR_VOICE, ATTR_MODEL]
_attr_entity_category = EntityCategory.CONFIG
def __init__(
@ -141,13 +142,14 @@ class ElevenLabsTTSEntity(TextToSpeechEntity):
_LOGGER.debug("Getting TTS audio for %s", message)
_LOGGER.debug("Options: %s", options)
voice_id = options.get(ATTR_VOICE, self._default_voice_id)
model = options.get(ATTR_MODEL, self._model.model_id)
try:
audio = await self._client.generate(
text=message,
voice=voice_id,
optimize_streaming_latency=self._latency,
voice_settings=self._voice_settings,
model=self._model.model_id,
model=model,
)
bytes_combined = b"".join([byte_seg async for byte_seg in audio])
except ApiError as exc:

View File

@ -13,6 +13,7 @@ import pytest
from homeassistant.components import tts
from homeassistant.components.elevenlabs.const import (
ATTR_MODEL,
CONF_MODEL,
CONF_OPTIMIZE_LATENCY,
CONF_SIMILARITY,
@ -163,6 +164,16 @@ async def mock_config_entry_setup(
@pytest.mark.parametrize(
("setup", "tts_service", "service_data"),
[
(
"mock_config_entry_setup",
"speak",
{
ATTR_ENTITY_ID: "tts.mock_title",
tts.ATTR_MEDIA_PLAYER_ENTITY_ID: "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
tts.ATTR_OPTIONS: {},
},
),
(
"mock_config_entry_setup",
"speak",
@ -173,6 +184,26 @@ async def mock_config_entry_setup(
tts.ATTR_OPTIONS: {tts.ATTR_VOICE: "voice2"},
},
),
(
"mock_config_entry_setup",
"speak",
{
ATTR_ENTITY_ID: "tts.mock_title",
tts.ATTR_MEDIA_PLAYER_ENTITY_ID: "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
tts.ATTR_OPTIONS: {ATTR_MODEL: "model2"},
},
),
(
"mock_config_entry_setup",
"speak",
{
ATTR_ENTITY_ID: "tts.mock_title",
tts.ATTR_MEDIA_PLAYER_ENTITY_ID: "media_player.something",
tts.ATTR_MESSAGE: "There is a person at the front door.",
tts.ATTR_OPTIONS: {tts.ATTR_VOICE: "voice2", ATTR_MODEL: "model2"},
},
),
],
indirect=["setup"],
)
@ -206,11 +237,13 @@ async def test_tts_service_speak(
await retrieve_media(hass, hass_client, calls[0].data[ATTR_MEDIA_CONTENT_ID])
== HTTPStatus.OK
)
voice_id = service_data[tts.ATTR_OPTIONS].get(tts.ATTR_VOICE, "voice1")
model_id = service_data[tts.ATTR_OPTIONS].get(ATTR_MODEL, "model1")
tts_entity._client.generate.assert_called_once_with(
text="There is a person at the front door.",
voice="voice2",
model="model1",
voice=voice_id,
model=model_id,
voice_settings=tts_entity._voice_settings,
optimize_streaming_latency=tts_entity._latency,
)