Switch to official ollama library, update models (#122471)

* Switch to mainstream ollama library, update models

* Fix mypy error
pull/122475/head
Michael Hansen 2024-07-23 10:57:54 -05:00 committed by GitHub
parent d7b0d1a50e
commit 3ba2a0518e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 35 additions and 14 deletions

View File

@ -86,41 +86,60 @@ MAX_HISTORY_SECONDS = 60 * 60 # 1 hour
MODEL_NAMES = [ # https://ollama.com/library
"alfred",
"all-minilm",
"aya",
"bakllava",
"codebooga",
"codegeex4",
"codegemma",
"codellama",
"codeqwen",
"codestral",
"codeup",
"command-r",
"command-r-plus",
"dbrx",
"deepseek-coder",
"deepseek-coder-v2",
"deepseek-llm",
"deepseek-v2",
"dolphincoder",
"dolphin-llama3",
"dolphin-mistral",
"dolphin-mixtral",
"dolphin-phi",
"dolphincoder",
"duckdb-nsql",
"everythinglm",
"falcon",
"falcon2",
"firefunction-v2",
"gemma",
"gemma2",
"glm4",
"goliath",
"llama-pro",
"granite-code",
"internlm2",
"llama2",
"llama2-chinese",
"llama2-uncensored",
"llama3",
"llama3-chatqa",
"llama3-gradient",
"llama3-groq-tool-use",
"llama-pro",
"llava",
"llava-llama3",
"llava-phi3",
"magicoder",
"mathstral",
"meditron",
"medllama2",
"megadolphin",
"mistral",
"mistral-openorca",
"mistrallite",
"mistral-nemo",
"mistral-openorca",
"mixtral",
"moondream",
"mxbai-embed-large",
"neural-chat",
"nexusraven",
@ -130,36 +149,38 @@ MODEL_NAMES = [ # https://ollama.com/library
"nous-hermes",
"nous-hermes2",
"nous-hermes2-mixtral",
"open-orca-platypus2",
"nuextract",
"openchat",
"openhermes",
"orca-mini",
"open-orca-platypus2",
"orca2",
"orca-mini",
"phi",
"phi3",
"phind-codellama",
"qwen",
"qwen2",
"samantha-mistral",
"snowflake-arctic-embed",
"solar",
"sqlcoder",
"stable-beluga",
"stable-code",
"stablelm-zephyr",
"stablelm2",
"stablelm-zephyr",
"starcoder",
"starcoder2",
"starling-lm",
"tinydolphin",
"tinyllama",
"vicuna",
"wizardcoder",
"wizardlm",
"wizardlm2",
"wizardlm-uncensored",
"wizard-math",
"wizard-vicuna",
"wizard-vicuna-uncensored",
"wizardcoder",
"wizardlm",
"wizardlm-uncensored",
"wizardlm2",
"xwinlm",
"yarn-llama2",
"yarn-mistral",

View File

@ -8,5 +8,5 @@
"documentation": "https://www.home-assistant.io/integrations/ollama",
"integration_type": "service",
"iot_class": "local_polling",
"requirements": ["ollama-hass==0.1.7"]
"requirements": ["ollama==0.3.0"]
}

View File

@ -29,7 +29,7 @@ class MessageHistory:
@property
def num_user_messages(self) -> int:
"""Return a count of user messages."""
return sum(m["role"] == MessageRole.USER for m in self.messages)
return sum(m["role"] == MessageRole.USER.value for m in self.messages)
@dataclass(frozen=True)

View File

@ -1463,7 +1463,7 @@ odp-amsterdam==6.0.2
oemthermostat==1.1.1
# homeassistant.components.ollama
ollama-hass==0.1.7
ollama==0.3.0
# homeassistant.components.omnilogic
omnilogic==0.4.5

View File

@ -1199,7 +1199,7 @@ objgraph==3.5.0
odp-amsterdam==6.0.2
# homeassistant.components.ollama
ollama-hass==0.1.7
ollama==0.3.0
# homeassistant.components.omnilogic
omnilogic==0.4.5