Use Configuration of the rearch branch (#4803)
parent
b1570543c8
commit
9f353f41c4
|
@ -1,223 +1,263 @@
|
|||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
from typing import List
|
||||
from typing import Dict
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
|
||||
import autogpt
|
||||
from autogpt.core.configuration.schema import Configurable, SystemSettings
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Config:
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
class ConfigSettings(SystemSettings):
|
||||
fast_llm_model: str
|
||||
smart_llm_model: str
|
||||
continuous_mode: bool
|
||||
skip_news: bool
|
||||
workspace_path: Optional[str]
|
||||
file_logger_path: Optional[str]
|
||||
debug_mode: bool
|
||||
plugins_dir: str
|
||||
plugins_config: dict[str, str]
|
||||
continuous_limit: int
|
||||
speak_mode: bool
|
||||
skip_reprompt: bool
|
||||
allow_downloads: bool
|
||||
exit_key: str
|
||||
plain_output: bool
|
||||
disabled_command_categories: list[str]
|
||||
shell_command_control: str
|
||||
shell_denylist: list[str]
|
||||
shell_allowlist: list[str]
|
||||
ai_settings_file: str
|
||||
prompt_settings_file: str
|
||||
embedding_model: str
|
||||
browse_spacy_language_model: str
|
||||
openai_api_key: Optional[str]
|
||||
openai_organization: Optional[str]
|
||||
temperature: float
|
||||
use_azure: bool
|
||||
execute_local_commands: bool
|
||||
restrict_to_workspace: bool
|
||||
openai_api_type: Optional[str]
|
||||
openai_api_base: Optional[str]
|
||||
openai_api_version: Optional[str]
|
||||
openai_functions: bool
|
||||
elevenlabs_api_key: Optional[str]
|
||||
streamelements_voice: str
|
||||
text_to_speech_provider: str
|
||||
github_api_key: Optional[str]
|
||||
github_username: Optional[str]
|
||||
google_api_key: Optional[str]
|
||||
google_custom_search_engine_id: Optional[str]
|
||||
image_provider: Optional[str]
|
||||
image_size: int
|
||||
huggingface_api_token: Optional[str]
|
||||
huggingface_image_model: str
|
||||
audio_to_text_provider: str
|
||||
huggingface_audio_to_text_model: Optional[str]
|
||||
sd_webui_url: Optional[str]
|
||||
sd_webui_auth: Optional[str]
|
||||
selenium_web_browser: str
|
||||
selenium_headless: bool
|
||||
user_agent: str
|
||||
memory_backend: str
|
||||
memory_index: str
|
||||
redis_host: str
|
||||
redis_port: int
|
||||
redis_password: str
|
||||
wipe_redis_on_start: bool
|
||||
plugins_allowlist: list[str]
|
||||
plugins_denylist: list[str]
|
||||
plugins_openai: list[str]
|
||||
plugins_config_file: str
|
||||
chat_messages_enabled: bool
|
||||
elevenlabs_voice_id: Optional[str]
|
||||
plugins: list[str]
|
||||
authorise_key: str
|
||||
|
||||
def __init__(self) -> None:
|
||||
|
||||
class Config(Configurable):
|
||||
default_plugins_config_file = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
|
||||
)
|
||||
|
||||
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif elevenlabs_api_key:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
|
||||
defaults_settings = ConfigSettings(
|
||||
name="Default Server Config",
|
||||
description="This is a default server configuration",
|
||||
smart_llm_model="gpt-3.5-turbo",
|
||||
fast_llm_model="gpt-3.5-turbo",
|
||||
continuous_mode=False,
|
||||
continuous_limit=0,
|
||||
skip_news=False,
|
||||
debug_mode=False,
|
||||
plugins_dir="plugins",
|
||||
plugins_config={},
|
||||
speak_mode=False,
|
||||
skip_reprompt=False,
|
||||
allow_downloads=False,
|
||||
exit_key="n",
|
||||
plain_output=False,
|
||||
disabled_command_categories=[],
|
||||
shell_command_control="denylist",
|
||||
shell_denylist=["sudo", "su"],
|
||||
shell_allowlist=[],
|
||||
ai_settings_file="ai_settings.yaml",
|
||||
prompt_settings_file="prompt_settings.yaml",
|
||||
embedding_model="text-embedding-ada-002",
|
||||
browse_spacy_language_model="en_core_web_sm",
|
||||
temperature=0,
|
||||
use_azure=False,
|
||||
execute_local_commands=False,
|
||||
restrict_to_workspace=True,
|
||||
openai_functions=False,
|
||||
streamelements_voice="Brian",
|
||||
text_to_speech_provider=default_tts_provider,
|
||||
image_size=256,
|
||||
huggingface_image_model="CompVis/stable-diffusion-v1-4",
|
||||
audio_to_text_provider="huggingface",
|
||||
sd_webui_url="http://localhost:7860",
|
||||
selenium_web_browser="chrome",
|
||||
selenium_headless=True,
|
||||
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
memory_backend="json_file",
|
||||
memory_index="auto-gpt-memory",
|
||||
redis_host="localhost",
|
||||
redis_port=6379,
|
||||
wipe_redis_on_start=True,
|
||||
plugins_allowlist=[],
|
||||
plugins_denylist=[],
|
||||
plugins_openai=[],
|
||||
plugins_config_file=default_plugins_config_file,
|
||||
chat_messages_enabled=True,
|
||||
plugins=[],
|
||||
authorise_key="y",
|
||||
redis_password="",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls):
|
||||
"""Initialize the Config class"""
|
||||
self.workspace_path: str = None
|
||||
self.file_logger_path: str = None
|
||||
|
||||
self.debug_mode = False
|
||||
self.continuous_mode = False
|
||||
self.continuous_limit = 0
|
||||
self.speak_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.allow_downloads = False
|
||||
self.skip_news = False
|
||||
|
||||
self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y")
|
||||
self.exit_key = os.getenv("EXIT_KEY", "n")
|
||||
self.plain_output = os.getenv("PLAIN_OUTPUT", "False") == "True"
|
||||
config_dict = {
|
||||
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
|
||||
"exit_key": os.getenv("EXIT_KEY"),
|
||||
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
|
||||
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
|
||||
"ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
|
||||
"prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
|
||||
"fast_llm_model": os.getenv("FAST_LLM_MODEL"),
|
||||
"smart_llm_model": os.getenv("SMART_LLM_MODEL"),
|
||||
"embedding_model": os.getenv("EMBEDDING_MODEL"),
|
||||
"browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
|
||||
"openai_api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"use_azure": os.getenv("USE_AZURE") == "True",
|
||||
"execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
|
||||
== "True",
|
||||
"restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
|
||||
== "True",
|
||||
"openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
|
||||
"elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
|
||||
"streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
|
||||
"text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
|
||||
"github_api_key": os.getenv("GITHUB_API_KEY"),
|
||||
"github_username": os.getenv("GITHUB_USERNAME"),
|
||||
"google_api_key": os.getenv("GOOGLE_API_KEY"),
|
||||
"image_provider": os.getenv("IMAGE_PROVIDER"),
|
||||
"huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"),
|
||||
"huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"),
|
||||
"audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"),
|
||||
"huggingface_audio_to_text_model": os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
),
|
||||
"sd_webui_url": os.getenv("SD_WEBUI_URL"),
|
||||
"sd_webui_auth": os.getenv("SD_WEBUI_AUTH"),
|
||||
"selenium_web_browser": os.getenv("USE_WEB_BROWSER"),
|
||||
"selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True",
|
||||
"user_agent": os.getenv("USER_AGENT"),
|
||||
"memory_backend": os.getenv("MEMORY_BACKEND"),
|
||||
"memory_index": os.getenv("MEMORY_INDEX"),
|
||||
"redis_host": os.getenv("REDIS_HOST"),
|
||||
"redis_password": os.getenv("REDIS_PASSWORD"),
|
||||
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
|
||||
"plugins_dir": os.getenv("PLUGINS_DIR"),
|
||||
"plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"),
|
||||
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
|
||||
}
|
||||
|
||||
# Converting to a list from comma-separated string
|
||||
disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
|
||||
if disabled_command_categories:
|
||||
self.disabled_command_categories = disabled_command_categories.split(",")
|
||||
else:
|
||||
self.disabled_command_categories = []
|
||||
config_dict[
|
||||
"disabled_command_categories"
|
||||
] = disabled_command_categories.split(",")
|
||||
|
||||
self.shell_command_control = os.getenv("SHELL_COMMAND_CONTROL", "denylist")
|
||||
|
||||
# DENY_COMMANDS is deprecated and included for backwards-compatibility
|
||||
# Converting to a list from comma-separated string
|
||||
shell_denylist = os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
if shell_denylist:
|
||||
self.shell_denylist = shell_denylist.split(",")
|
||||
else:
|
||||
self.shell_denylist = ["sudo", "su"]
|
||||
config_dict["shell_denylist"] = shell_denylist.split(",")
|
||||
|
||||
# ALLOW_COMMANDS is deprecated and included for backwards-compatibility
|
||||
shell_allowlist = os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
if shell_allowlist:
|
||||
self.shell_allowlist = shell_allowlist.split(",")
|
||||
else:
|
||||
self.shell_allowlist = []
|
||||
config_dict["shell_allowlist"] = shell_allowlist.split(",")
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.prompt_settings_file = os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
|
||||
)
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
|
||||
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
self.restrict_to_workspace = (
|
||||
os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
|
||||
)
|
||||
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
openai.api_type = self.openai_api_type
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
elif os.getenv("OPENAI_API_BASE_URL", None):
|
||||
openai.api_base = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
if self.openai_organization is not None:
|
||||
openai.organization = self.openai_organization
|
||||
|
||||
self.openai_functions = os.getenv("OPENAI_FUNCTIONS", "False") == "True"
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
# ELEVENLABS_VOICE_1_ID is deprecated and included for backwards-compatibility
|
||||
self.elevenlabs_voice_id = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
self.streamelements_voice = os.getenv("STREAMELEMENTS_VOICE", "Brian")
|
||||
|
||||
# Backwards-compatibility shim for deprecated env variables
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif self.elevenlabs_api_key:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
|
||||
self.text_to_speech_provider = os.getenv(
|
||||
"TEXT_TO_SPEECH_PROVIDER", default_tts_provider
|
||||
)
|
||||
|
||||
self.github_api_key = os.getenv("GITHUB_API_KEY")
|
||||
self.github_username = os.getenv("GITHUB_USERNAME")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
# CUSTOM_SEARCH_ENGINE_ID is deprecated and included for backwards-compatibility
|
||||
self.google_custom_search_engine_id = os.getenv(
|
||||
config_dict["google_custom_search_engine_id"] = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
)
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
self.audio_to_text_provider = os.getenv("AUDIO_TO_TEXT_PROVIDER", "huggingface")
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
|
||||
self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
|
||||
|
||||
# Selenium browser settings
|
||||
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
|
||||
self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
|
||||
|
||||
# User agent header to use when making HTTP requests
|
||||
# Some websites might just completely deny request with an error code if
|
||||
# no user agent was found.
|
||||
self.user_agent = os.getenv(
|
||||
"USER_AGENT",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
|
||||
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
config_dict["elevenlabs_voice_id"] = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "json_file")
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt-memory")
|
||||
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
|
||||
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
|
||||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
# Deprecated. Kept for backwards-compatibility. Will remove in a future version.
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
config_dict["plugins_allowlist"] = plugins_allowlist.split(",")
|
||||
|
||||
# Deprecated. Kept for backwards-compatibility. Will remove in a future version.
|
||||
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
|
||||
if plugins_denylist:
|
||||
self.plugins_denylist = plugins_denylist.split(",")
|
||||
else:
|
||||
self.plugins_denylist = []
|
||||
config_dict["plugins_denylist"] = plugins_denylist.split(",")
|
||||
|
||||
# Avoid circular imports
|
||||
from autogpt.plugins import DEFAULT_PLUGINS_CONFIG_FILE
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["redis_port"] = int(os.getenv("REDIS_PORT"))
|
||||
with contextlib.suppress(TypeError):
|
||||
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
|
||||
|
||||
self.plugins_config_file = os.getenv(
|
||||
"PLUGINS_CONFIG_FILE", DEFAULT_PLUGINS_CONFIG_FILE
|
||||
)
|
||||
self.load_plugins_config()
|
||||
if config_dict["use_azure"]:
|
||||
azure_config = cls.load_azure_config()
|
||||
config_dict["openai_api_type"] = azure_config["openai_api_type"]
|
||||
config_dict["openai_api_base"] = azure_config["openai_api_base"]
|
||||
config_dict["openai_api_version"] = azure_config["openai_api_version"]
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
if os.getenv("OPENAI_API_BASE_URL"):
|
||||
config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
def load_plugins_config(self) -> "autogpt.plugins.PluginsConfig":
|
||||
# Avoid circular import
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
openai_organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
if openai_organization is not None:
|
||||
config_dict["openai_organization"] = openai_organization
|
||||
|
||||
self.plugins_config = PluginsConfig.load_config(global_config=self)
|
||||
return self.plugins_config
|
||||
config_dict_without_none_values = {
|
||||
k: v for k, v in config_dict.items() if v is not None
|
||||
}
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
Returns the relevant deployment id for the model specified.
|
||||
return cls.build_agent_configuration(config_dict_without_none_values)
|
||||
|
||||
Parameters:
|
||||
model(str): The model to map to the deployment id.
|
||||
|
||||
Returns:
|
||||
The matching deployment id if found, otherwise an empty string.
|
||||
"""
|
||||
if model == self.fast_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"fast_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == self.smart_llm_model:
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"smart_llm_model_deployment_id"
|
||||
] # type: ignore
|
||||
elif model == "text-embedding-ada-002":
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"embedding_model_deployment_id"
|
||||
] # type: ignore
|
||||
else:
|
||||
return ""
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
@classmethod
|
||||
def load_azure_config(cls, config_file: str = AZURE_CONFIG_FILE) -> Dict[str, str]:
|
||||
"""
|
||||
Loads the configuration parameters for Azure hosting from the specified file
|
||||
path as a yaml file.
|
||||
|
@ -226,80 +266,20 @@ class Config:
|
|||
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
Dict
|
||||
"""
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
self.openai_api_type = config_params.get("azure_api_type") or "azure"
|
||||
self.openai_api_base = config_params.get("azure_api_base") or ""
|
||||
self.openai_api_version = (
|
||||
config_params.get("azure_api_version") or "2023-03-15-preview"
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", {})
|
||||
|
||||
def set_continuous_mode(self, value: bool) -> None:
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_continuous_limit(self, value: int) -> None:
|
||||
"""Set the continuous limit value."""
|
||||
self.continuous_limit = value
|
||||
|
||||
def set_speak_mode(self, value: bool) -> None:
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str) -> None:
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str) -> None:
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_embedding_model(self, value: str) -> None:
|
||||
"""Set the model to use for creating embeddings."""
|
||||
self.embedding_model = value
|
||||
|
||||
def set_openai_api_key(self, value: str) -> None:
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str) -> None:
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_elevenlabs_voice_1_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
self.elevenlabs_voice_2_id = value
|
||||
|
||||
def set_google_api_key(self, value: str) -> None:
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str) -> None:
|
||||
"""Set the custom search engine id value."""
|
||||
self.google_custom_search_engine_id = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
self.debug_mode = value
|
||||
|
||||
def set_plugins(self, value: list) -> None:
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
|
||||
def set_temperature(self, value: int) -> None:
|
||||
"""Set the temperature value."""
|
||||
self.temperature = value
|
||||
|
||||
def set_memory_backend(self, name: str) -> None:
|
||||
"""Set the memory backend name."""
|
||||
self.memory_backend = name
|
||||
return {
|
||||
"openai_api_type": config_params.get("azure_api_type") or "azure",
|
||||
"openai_api_base": config_params.get("azure_api_base") or "",
|
||||
"openai_api_version": config_params.get("azure_api_version")
|
||||
or "2023-03-15-preview",
|
||||
"azure_model_to_deployment_id_map": config_params.get(
|
||||
"azure_model_map", {}
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
|
@ -318,7 +298,7 @@ def check_openai_api_key(config: Config) -> None:
|
|||
openai_api_key = openai_api_key.strip()
|
||||
if re.search(key_pattern, openai_api_key):
|
||||
os.environ["OPENAI_API_KEY"] = openai_api_key
|
||||
cfg.set_openai_api_key(openai_api_key)
|
||||
config.openai_api_key = openai_api_key
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ "OpenAI API key successfully set!\n"
|
||||
|
|
|
@ -51,13 +51,13 @@ def create_config(
|
|||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||
"""
|
||||
config.set_debug_mode(False)
|
||||
config.set_continuous_mode(False)
|
||||
config.set_speak_mode(False)
|
||||
config.debug_mode = False
|
||||
config.continuous_mode = False
|
||||
config.speak_mode = False
|
||||
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.set_debug_mode(True)
|
||||
config.debug_mode = True
|
||||
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
|
@ -68,13 +68,13 @@ def create_config(
|
|||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
config.set_continuous_mode(True)
|
||||
config.continuous_mode = True
|
||||
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
config.set_continuous_limit(continuous_limit)
|
||||
config.continuous_limit = continuous_limit
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if continuous_limit and not continuous:
|
||||
|
@ -82,14 +82,14 @@ def create_config(
|
|||
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
config.set_speak_mode(True)
|
||||
config.speak_mode = True
|
||||
|
||||
# Set the default LLM models
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_3_MODEL)
|
||||
config.set_smart_llm_model(GPT_3_MODEL)
|
||||
config.fast_llm_model = GPT_3_MODEL
|
||||
config.smart_llm_model = GPT_3_MODEL
|
||||
|
||||
elif (
|
||||
gpt4only
|
||||
|
@ -97,13 +97,11 @@ def create_config(
|
|||
):
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
|
||||
config.set_fast_llm_model(GPT_4_MODEL)
|
||||
config.set_smart_llm_model(GPT_4_MODEL)
|
||||
config.fast_llm_model = GPT_4_MODEL
|
||||
config.smart_llm_model = GPT_4_MODEL
|
||||
else:
|
||||
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
|
||||
config.set_smart_llm_model(
|
||||
check_model(config.smart_llm_model, "smart_llm_model")
|
||||
)
|
||||
config.fast_llm_model = check_model(config.fast_llm_model, "fast_llm_model")
|
||||
config.smart_llm_model = check_model(config.smart_llm_model, "smart_llm_model")
|
||||
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
import abc
|
||||
import copy
|
||||
import typing
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SystemConfiguration(BaseModel):
|
||||
def get_user_config(self) -> dict[str, Any]:
|
||||
return _get_user_config_fields(self)
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class SystemSettings(BaseModel, abc.ABC):
|
||||
"""A base class for all system settings."""
|
||||
|
||||
name: str
|
||||
description: typing.Optional[str]
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class Configurable(abc.ABC):
|
||||
"""A base class for all configurable objects."""
|
||||
|
||||
prefix: str = ""
|
||||
defaults_settings: typing.ClassVar[SystemSettings]
|
||||
|
||||
@classmethod
|
||||
def get_user_config(cls) -> dict[str, Any]:
|
||||
return _get_user_config_fields(cls.defaults_settings)
|
||||
|
||||
@classmethod
|
||||
def build_agent_configuration(cls, configuration: dict = {}) -> SystemSettings:
|
||||
"""Process the configuration for this object."""
|
||||
|
||||
defaults_settings = cls.defaults_settings.dict()
|
||||
final_configuration = deep_update(defaults_settings, configuration)
|
||||
|
||||
return cls.defaults_settings.__class__.parse_obj(final_configuration)
|
||||
|
||||
|
||||
def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
|
||||
"""
|
||||
Get the user config fields of a Pydantic model instance.
|
||||
Args:
|
||||
instance: The Pydantic model instance.
|
||||
Returns:
|
||||
The user config fields of the instance.
|
||||
"""
|
||||
user_config_fields = {}
|
||||
|
||||
for name, value in instance.__dict__.items():
|
||||
field_info = instance.__fields__[name]
|
||||
if "user_configurable" in field_info.field_info.extra:
|
||||
user_config_fields[name] = value
|
||||
elif isinstance(value, SystemConfiguration):
|
||||
user_config_fields[name] = value.get_user_config()
|
||||
elif isinstance(value, list) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value
|
||||
):
|
||||
user_config_fields[name] = [i.get_user_config() for i in value]
|
||||
elif isinstance(value, dict) and all(
|
||||
isinstance(i, SystemConfiguration) for i in value.values()
|
||||
):
|
||||
user_config_fields[name] = {
|
||||
k: v.get_user_config() for k, v in value.items()
|
||||
}
|
||||
|
||||
return user_config_fields
|
||||
|
||||
|
||||
def deep_update(original_dict: dict, update_dict: dict) -> dict:
|
||||
"""
|
||||
Recursively update a dictionary.
|
||||
Args:
|
||||
original_dict (dict): The dictionary to be updated.
|
||||
update_dict (dict): The dictionary to update with.
|
||||
Returns:
|
||||
dict: The updated dictionary.
|
||||
"""
|
||||
original_dict = copy.deepcopy(original_dict)
|
||||
for key, value in update_dict.items():
|
||||
if (
|
||||
key in original_dict
|
||||
and isinstance(original_dict[key], dict)
|
||||
and isinstance(value, dict)
|
||||
):
|
||||
original_dict[key] = deep_update(original_dict[key], value)
|
||||
else:
|
||||
original_dict[key] = value
|
||||
return original_dict
|
|
@ -5,7 +5,7 @@ import sys
|
|||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import get_memory
|
||||
|
@ -52,7 +52,8 @@ def run_auto_gpt(
|
|||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
logger.speak_mode = speak
|
||||
|
||||
config = Config()
|
||||
config = Config.build_config_from_env()
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key(config)
|
||||
|
||||
|
@ -120,7 +121,7 @@ def run_auto_gpt(
|
|||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
Workspace.build_file_logger_path(config, workspace_directory)
|
||||
|
||||
config.set_plugins(scan_plugins(config, config.debug_mode))
|
||||
config.plugins = scan_plugins(config, config.debug_mode)
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
|
|
|
@ -219,8 +219,8 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
|
|||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(config.plugins_dir)
|
||||
plugins_config = config.plugins_config
|
||||
|
||||
plugins_config = config.plugins_config
|
||||
# Directory-based plugins
|
||||
for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]:
|
||||
# Avoid going into __pycache__ or other hidden directories
|
||||
|
|
|
@ -13,9 +13,9 @@ def run_task(task) -> None:
|
|||
|
||||
|
||||
def bootstrap_agent(task):
|
||||
config = Config()
|
||||
config.set_continuous_mode(False)
|
||||
config.set_temperature(0)
|
||||
config = Config.build_config_from_env()
|
||||
config.continuous_mode = False
|
||||
config.temperature = 0
|
||||
config.plain_output = True
|
||||
command_registry = get_command_registry(config)
|
||||
config.memory_backend = "no_memory"
|
||||
|
|
|
@ -5,7 +5,7 @@ from autogpt.commands.file_operations import ingest_file, list_files
|
|||
from autogpt.config import Config
|
||||
from autogpt.memory.vector import VectorMemory, get_memory
|
||||
|
||||
config = Config()
|
||||
config = Config.build_config_from_env()
|
||||
|
||||
|
||||
def configure_logging():
|
||||
|
|
|
@ -49,13 +49,17 @@ def temp_plugins_config_file():
|
|||
def config(
|
||||
temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace
|
||||
) -> Config:
|
||||
config = Config()
|
||||
config = Config.build_config_from_env()
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
os.environ["OPENAI_API_KEY"] = "sk-dummy"
|
||||
|
||||
config.plugins_dir = "tests/unit/data/test_plugins"
|
||||
config.plugins_config_file = temp_plugins_config_file
|
||||
config.load_plugins_config()
|
||||
|
||||
# avoid circular dependency
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
config.plugins_config = PluginsConfig.load_config(global_config=config)
|
||||
|
||||
# Do a little setup and teardown since the config object is a singleton
|
||||
mocker.patch.multiple(
|
||||
|
@ -95,8 +99,7 @@ def agent(config: Config, workspace: Workspace) -> Agent:
|
|||
|
||||
command_registry = CommandRegistry()
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
config.set_memory_backend("json_file")
|
||||
config.memory_backend = "json_file"
|
||||
memory_json_file = get_memory(config)
|
||||
memory_json_file.clear()
|
||||
|
||||
|
|
|
@ -11,12 +11,12 @@ from autogpt.workspace import Workspace
|
|||
def memory_json_file(config: Config):
|
||||
was_memory_backend = config.memory_backend
|
||||
|
||||
config.set_memory_backend("json_file")
|
||||
config.memory_backend = "json_file"
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
yield memory
|
||||
|
||||
config.set_memory_backend(was_memory_backend)
|
||||
config.memory_backend = was_memory_backend
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
@ -38,7 +38,7 @@ def mock_get_embedding(mocker: MockerFixture, embedding_dimension: int):
|
|||
def memory_none(agent_test_config: Config, mock_get_embedding):
|
||||
was_memory_backend = agent_test_config.memory_backend
|
||||
|
||||
agent_test_config.set_memory_backend("no_memory")
|
||||
agent_test_config.memory_backend = "no_memory"
|
||||
yield get_memory(agent_test_config)
|
||||
|
||||
agent_test_config.set_memory_backend(was_memory_backend)
|
||||
agent_test_config.memory_backend = was_memory_backend
|
||||
|
|
|
@ -30,11 +30,11 @@ def test_set_continuous_mode(config: Config):
|
|||
# Store continuous mode to reset it after the test
|
||||
continuous_mode = config.continuous_mode
|
||||
|
||||
config.set_continuous_mode(True)
|
||||
config.continuous_mode = True
|
||||
assert config.continuous_mode == True
|
||||
|
||||
# Reset continuous mode
|
||||
config.set_continuous_mode(continuous_mode)
|
||||
config.continuous_mode = continuous_mode
|
||||
|
||||
|
||||
def test_set_speak_mode(config: Config):
|
||||
|
@ -44,11 +44,11 @@ def test_set_speak_mode(config: Config):
|
|||
# Store speak mode to reset it after the test
|
||||
speak_mode = config.speak_mode
|
||||
|
||||
config.set_speak_mode(True)
|
||||
config.speak_mode = True
|
||||
assert config.speak_mode == True
|
||||
|
||||
# Reset speak mode
|
||||
config.set_speak_mode(speak_mode)
|
||||
config.speak_mode = speak_mode
|
||||
|
||||
|
||||
def test_set_fast_llm_model(config: Config):
|
||||
|
@ -58,11 +58,11 @@ def test_set_fast_llm_model(config: Config):
|
|||
# Store model name to reset it after the test
|
||||
fast_llm_model = config.fast_llm_model
|
||||
|
||||
config.set_fast_llm_model("gpt-3.5-turbo-test")
|
||||
config.fast_llm_model = "gpt-3.5-turbo-test"
|
||||
assert config.fast_llm_model == "gpt-3.5-turbo-test"
|
||||
|
||||
# Reset model name
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.fast_llm_model = fast_llm_model
|
||||
|
||||
|
||||
def test_set_smart_llm_model(config: Config):
|
||||
|
@ -72,11 +72,11 @@ def test_set_smart_llm_model(config: Config):
|
|||
# Store model name to reset it after the test
|
||||
smart_llm_model = config.smart_llm_model
|
||||
|
||||
config.set_smart_llm_model("gpt-4-test")
|
||||
config.smart_llm_model = "gpt-4-test"
|
||||
assert config.smart_llm_model == "gpt-4-test"
|
||||
|
||||
# Reset model name
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
config.smart_llm_model = smart_llm_model
|
||||
|
||||
|
||||
def test_set_debug_mode(config: Config):
|
||||
|
@ -86,11 +86,11 @@ def test_set_debug_mode(config: Config):
|
|||
# Store debug mode to reset it after the test
|
||||
debug_mode = config.debug_mode
|
||||
|
||||
config.set_debug_mode(True)
|
||||
config.debug_mode = True
|
||||
assert config.debug_mode == True
|
||||
|
||||
# Reset debug mode
|
||||
config.set_debug_mode(debug_mode)
|
||||
config.debug_mode = debug_mode
|
||||
|
||||
|
||||
@patch("openai.Model.list")
|
||||
|
@ -127,22 +127,22 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config: Config)
|
|||
assert config.smart_llm_model == "gpt-3.5-turbo"
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
config.fast_llm_model = fast_llm_model
|
||||
config.smart_llm_model = smart_llm_model
|
||||
|
||||
|
||||
def test_missing_azure_config(config: Config, workspace: Workspace):
|
||||
config_file = workspace.get_path("azure_config.yaml")
|
||||
with pytest.raises(FileNotFoundError):
|
||||
config.load_azure_config(str(config_file))
|
||||
Config.load_azure_config(str(config_file))
|
||||
|
||||
config_file.write_text("")
|
||||
config.load_azure_config(str(config_file))
|
||||
azure_config = Config.load_azure_config(str(config_file))
|
||||
|
||||
assert config.openai_api_type == "azure"
|
||||
assert config.openai_api_base == ""
|
||||
assert config.openai_api_version == "2023-03-15-preview"
|
||||
assert config.azure_model_to_deployment_id_map == {}
|
||||
assert azure_config["openai_api_type"] == "azure"
|
||||
assert azure_config["openai_api_base"] == ""
|
||||
assert azure_config["openai_api_version"] == "2023-03-15-preview"
|
||||
assert azure_config["azure_model_to_deployment_id_map"] == {}
|
||||
|
||||
|
||||
def test_create_config_gpt4only(config: Config) -> None:
|
||||
|
@ -170,8 +170,8 @@ def test_create_config_gpt4only(config: Config) -> None:
|
|||
assert config.smart_llm_model == GPT_4_MODEL
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
config.fast_llm_model = fast_llm_model
|
||||
config.smart_llm_model = smart_llm_model
|
||||
|
||||
|
||||
def test_create_config_gpt3only(config: Config) -> None:
|
||||
|
@ -199,5 +199,5 @@ def test_create_config_gpt3only(config: Config) -> None:
|
|||
assert config.smart_llm_model == GPT_3_MODEL
|
||||
|
||||
# Reset config
|
||||
config.set_fast_llm_model(fast_llm_model)
|
||||
config.set_smart_llm_model(smart_llm_model)
|
||||
config.fast_llm_model = fast_llm_model
|
||||
config.smart_llm_model = smart_llm_model
|
||||
|
|
|
@ -5,6 +5,7 @@ import yaml
|
|||
from autogpt.config.config import Config
|
||||
from autogpt.plugins import inspect_zip_for_modules, scan_plugins
|
||||
from autogpt.plugins.plugin_config import PluginConfig
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
PLUGINS_TEST_DIR = "tests/unit/data/test_plugins"
|
||||
PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip"
|
||||
|
@ -69,7 +70,7 @@ def test_create_base_config(config: Config):
|
|||
config.plugins_denylist = ["c", "d"]
|
||||
|
||||
os.remove(config.plugins_config_file)
|
||||
plugins_config = config.load_plugins_config()
|
||||
plugins_config = PluginsConfig.load_config(global_config=config)
|
||||
|
||||
# Check the structure of the plugins config data
|
||||
assert len(plugins_config.plugins) == 4
|
||||
|
@ -101,7 +102,7 @@ def test_load_config(config: Config):
|
|||
f.write(yaml.dump(test_config))
|
||||
|
||||
# Load the config from disk
|
||||
plugins_config = config.load_plugins_config()
|
||||
plugins_config = PluginsConfig.load_config(global_config=config)
|
||||
|
||||
# Check that the loaded config is equal to the test config
|
||||
assert len(plugins_config.plugins) == 2
|
||||
|
|
Loading…
Reference in New Issue