56 lines
2.0 KiB
JSON
56 lines
2.0 KiB
JSON
{
|
|
"config": {
|
|
"step": {
|
|
"user": {
|
|
"data": {
|
|
"api_key": "[%key:common::config_flow::data::api_key%]"
|
|
}
|
|
}
|
|
},
|
|
"error": {
|
|
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
|
|
"timeout_connect": "[%key:common::config_flow::error::timeout_connect%]",
|
|
"authentication_error": "[%key:common::config_flow::error::invalid_auth%]",
|
|
"unknown": "[%key:common::config_flow::error::unknown%]"
|
|
},
|
|
"abort": {
|
|
"already_configured": "[%key:common::config_flow::abort::already_configured_service%]"
|
|
}
|
|
},
|
|
"config_subentries": {
|
|
"conversation": {
|
|
"initiate_flow": {
|
|
"user": "Add conversation agent",
|
|
"reconfigure": "Reconfigure conversation agent"
|
|
},
|
|
"entry_type": "Conversation agent",
|
|
|
|
"step": {
|
|
"set_options": {
|
|
"data": {
|
|
"name": "[%key:common::config_flow::data::name%]",
|
|
"prompt": "[%key:common::config_flow::data::prompt%]",
|
|
"chat_model": "[%key:common::generic::model%]",
|
|
"max_tokens": "Maximum tokens to return in response",
|
|
"temperature": "Temperature",
|
|
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]",
|
|
"recommended": "Recommended model settings",
|
|
"thinking_budget_tokens": "Thinking budget"
|
|
},
|
|
"data_description": {
|
|
"prompt": "Instruct how the LLM should respond. This can be a template.",
|
|
"thinking_budget_tokens": "The number of tokens the model can use to think about the response out of the total maximum number of tokens. Set to 1024 or greater to enable extended thinking."
|
|
}
|
|
}
|
|
},
|
|
"abort": {
|
|
"reconfigure_successful": "[%key:common::config_flow::abort::reconfigure_successful%]",
|
|
"entry_not_loaded": "Cannot add things while the configuration is disabled."
|
|
},
|
|
"error": {
|
|
"thinking_budget_too_large": "Maximum tokens must be greater than the thinking budget."
|
|
}
|
|
}
|
|
}
|
|
}
|