From 78734dade8a8169b097d8602e5f1f520725c0f2f Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 18 Apr 2023 23:40:43 +0200 Subject: [PATCH 1/3] Consolidate default config with config.py as master --- .env.template | 198 ++++++++++++++++++++------------------- autogpt/config/config.py | 4 +- 2 files changed, 102 insertions(+), 100 deletions(-) diff --git a/.env.template b/.env.template index 09deeb931..ac3efacdc 100644 --- a/.env.template +++ b/.env.template @@ -1,16 +1,20 @@ ################################################################################ ### AUTO-GPT - GENERAL SETTINGS ################################################################################ -# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False) -EXECUTE_LOCAL_COMMANDS=False -# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory -BROWSE_CHUNK_MAX_LENGTH=8192 -# USER_AGENT - Define the user-agent used by the requests library to browse website (string) +## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False) +# EXECUTE_LOCAL_COMMANDS=False + +## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory +# BROWSE_CHUNK_MAX_LENGTH=8192 + +## USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" -# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) -AI_SETTINGS_FILE=ai_settings.yaml -# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome). -# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser + +## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) +# AI_SETTINGS_FILE=ai_settings.yaml + +## USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome). +## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser # USE_WEB_BROWSER=chrome ################################################################################ @@ -18,156 +22,154 @@ AI_SETTINGS_FILE=ai_settings.yaml ################################################################################ ### OPENAI -# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key) -# TEMPERATURE - Sets temperature in OpenAI (Default: 0) -# USE_AZURE - Use Azure OpenAI or not (Default: False) +## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key) +## TEMPERATURE - Sets temperature in OpenAI (Default: 0) +## USE_AZURE - Use Azure OpenAI or not (Default: False) OPENAI_API_KEY=your-openai-api-key -TEMPERATURE=0 -USE_AZURE=False +# TEMPERATURE=0 +# USE_AZURE=False ### AZURE -# cleanup azure env as already moved to `azure.yaml.template` +# moved to `azure.yaml.template` ################################################################################ ### LLM MODELS ################################################################################ -# SMART_LLM_MODEL - Smart language model (Default: gpt-4) -# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo) -SMART_LLM_MODEL=gpt-4 -FAST_LLM_MODEL=gpt-3.5-turbo +## SMART_LLM_MODEL - Smart language model (Default: gpt-4) +## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo) +# SMART_LLM_MODEL=gpt-4 +# FAST_LLM_MODEL=gpt-3.5-turbo ### LLM MODEL SETTINGS -# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000) -# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000) -# When using --gpt3only this needs to be set to 4000. -FAST_TOKEN_LIMIT=4000 -SMART_TOKEN_LIMIT=8000 +## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000) +## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000) +## When using --gpt3only this needs to be set to 4000. +# FAST_TOKEN_LIMIT=4000 +# SMART_TOKEN_LIMIT=8000 ################################################################################ ### MEMORY ################################################################################ ### MEMORY_BACKEND - Memory backend type -# local - Default -# pinecone - Pinecone (if configured) -# redis - Redis (if configured) -# milvus - Milvus (if configured) -MEMORY_BACKEND=local +## local - Default +## pinecone - Pinecone (if configured) +## redis - Redis (if configured) +## milvus - Milvus (if configured) +## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt) +# MEMORY_BACKEND=local +# MEMORY_INDEX=auto-gpt ### PINECONE -# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key) -# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2) -PINECONE_API_KEY=your-pinecone-api-key -PINECONE_ENV=your-pinecone-region +## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key) +## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2) +# PINECONE_API_KEY=your-pinecone-api-key +# PINECONE_ENV=your-pinecone-region ### REDIS -# REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) -# REDIS_PORT - Redis port (Default: 6379) -# REDIS_PASSWORD - Redis password (Default: "") -# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False) -# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt) -REDIS_HOST=localhost -REDIS_PORT=6379 -REDIS_PASSWORD= -WIPE_REDIS_ON_START=False -MEMORY_INDEX=auto-gpt +## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) +## REDIS_PORT - Redis port (Default: 6379) +## REDIS_PASSWORD - Redis password (Default: "") +## WIPE_REDIS_ON_START - Wipes data / index on start (Default: False) +# REDIS_HOST=localhost +# REDIS_PORT=6379 +# REDIS_PASSWORD= +# WIPE_REDIS_ON_START=False ### WEAVIATE -# MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage -# WEAVIATE_HOST - Weaviate host IP -# WEAVIATE_PORT - Weaviate host port -# WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http') -# USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate -# WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate -# WEAVIATE_USERNAME - Weaviate username -# WEAVIATE_PASSWORD - Weaviate password -# WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication -# MEMORY_INDEX - Name of index to create in Weaviate -WEAVIATE_HOST="127.0.0.1" -WEAVIATE_PORT=8080 -WEAVIATE_PROTOCOL="http" -USE_WEAVIATE_EMBEDDED=False -WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" -WEAVIATE_USERNAME= -WEAVIATE_PASSWORD= -WEAVIATE_API_KEY= -MEMORY_INDEX=AutoGpt +## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage +## WEAVIATE_HOST - Weaviate host IP +## WEAVIATE_PORT - Weaviate host port +## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http') +## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate +## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate +## WEAVIATE_USERNAME - Weaviate username +## WEAVIATE_PASSWORD - Weaviate password +## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication +# WEAVIATE_HOST="127.0.0.1" +# WEAVIATE_PORT=8080 +# WEAVIATE_PROTOCOL="http" +# USE_WEAVIATE_EMBEDDED=False +# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" +# WEAVIATE_USERNAME= +# WEAVIATE_PASSWORD= +# WEAVIATE_API_KEY= ### MILVUS -# MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) -# MILVUS_COLLECTION - Milvus collection, -# change it if you want to start a new memory and retain the old memory. -MILVUS_ADDR=your-milvus-cluster-host-port -MILVUS_COLLECTION=autogpt +## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) +## MILVUS_COLLECTION - Milvus collection, +## change it if you want to start a new memory and retain the old memory. +# MILVUS_ADDR=your-milvus-cluster-host-port +# MILVUS_COLLECTION=autogpt ################################################################################ ### IMAGE GENERATION PROVIDER ################################################################################ ### OPEN AI -# IMAGE_PROVIDER - Image provider (Example: dalle) -IMAGE_PROVIDER=dalle +## IMAGE_PROVIDER - Image provider (Example: dalle) +# IMAGE_PROVIDER=dalle ### HUGGINGFACE -# STABLE DIFFUSION -# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4) -# Set in image_gen.py) -# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token) -HUGGINGFACE_API_TOKEN=your-huggingface-api-token +## STABLE DIFFUSION +## (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4) +## Set in image_gen.py) +## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token) +# HUGGINGFACE_API_TOKEN=your-huggingface-api-token ################################################################################ ### AUDIO TO TEXT PROVIDER ################################################################################ ### HUGGINGFACE -HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h +# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h ################################################################################ ### GIT Provider for repository actions ################################################################################ ### GITHUB -# GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123) -# GITHUB_USERNAME - Github username -GITHUB_API_KEY=github_pat_123 -GITHUB_USERNAME=your-github-username +## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123) +## GITHUB_USERNAME - Github username +# GITHUB_API_KEY=github_pat_123 +# GITHUB_USERNAME=your-github-username ################################################################################ ### SEARCH PROVIDER ################################################################################ ### GOOGLE -# GOOGLE_API_KEY - Google API key (Example: my-google-api-key) -# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id) -GOOGLE_API_KEY=your-google-api-key -CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id +## GOOGLE_API_KEY - Google API key (Example: my-google-api-key) +## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id) +# GOOGLE_API_KEY=your-google-api-key +# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id ################################################################################ ### TTS PROVIDER ################################################################################ ### MAC OS -# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False) -USE_MAC_OS_TTS=False +## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False) +# USE_MAC_OS_TTS=False ### STREAMELEMENTS -# USE_BRIAN_TTS - Use Brian TTS or not (Default: False) -USE_BRIAN_TTS=False +## USE_BRIAN_TTS - Use Brian TTS or not (Default: False) +# USE_BRIAN_TTS=False ### ELEVENLABS -# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key) -# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1) -# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2) -ELEVENLABS_API_KEY=your-elevenlabs-api-key -ELEVENLABS_VOICE_1_ID=your-voice-id-1 -ELEVENLABS_VOICE_2_ID=your-voice-id-2 +## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key) +## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1) +## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2) +# ELEVENLABS_API_KEY=your-elevenlabs-api-key +# ELEVENLABS_VOICE_1_ID=your-voice-id-1 +# ELEVENLABS_VOICE_2_ID=your-voice-id-2 ################################################################################ -### TWITTER API +### TWITTER API ################################################################################ -TW_CONSUMER_KEY= -TW_CONSUMER_SECRET= -TW_ACCESS_TOKEN= -TW_ACCESS_TOKEN_SECRET= +# TW_CONSUMER_KEY= +# TW_CONSUMER_SECRET= +# TW_ACCESS_TOKEN= +# TW_ACCESS_TOKEN_SECRET= diff --git a/autogpt/config/config.py b/autogpt/config/config.py index bc75b0319..89ddcd33c 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -34,7 +34,7 @@ class Config(metaclass=Singleton): self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) self.openai_api_key = os.getenv("OPENAI_API_KEY") - self.temperature = float(os.getenv("TEMPERATURE", "1")) + self.temperature = float(os.getenv("TEMPERATURE", "0")) self.use_azure = os.getenv("USE_AZURE") == "True" self.execute_local_commands = ( os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" @@ -98,7 +98,7 @@ class Config(metaclass=Singleton): self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_password = os.getenv("REDIS_PASSWORD", "") - self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" + self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "False") == "True" self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt") # Note that indexes must be created on db 0 in redis, this is not configurable. From 20bd2de54a480bee59fb933195eb75a57fda2bec Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 19 Apr 2023 18:19:39 +0200 Subject: [PATCH 2/3] Add headless browser setting --- .env.template | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.env.template b/.env.template index d23019fff..0e50fcaf8 100644 --- a/.env.template +++ b/.env.template @@ -147,8 +147,10 @@ OPENAI_API_KEY=your-openai-api-key ################################################################################ ### BROWSER +## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True) ## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome). ## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser +# HEADLESS_BROWSER=True # USE_WEB_BROWSER=chrome ### GOOGLE From e08b4d601f0cb74d10a38df0eb7a7e234285d284 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 19 Apr 2023 18:37:05 +0200 Subject: [PATCH 3/3] Set WIPE_REDIS_ON_START default True --- .env.template | 4 ++-- autogpt/config/config.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.env.template b/.env.template index 0e50fcaf8..584869046 100644 --- a/.env.template +++ b/.env.template @@ -70,11 +70,11 @@ OPENAI_API_KEY=your-openai-api-key ## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) ## REDIS_PORT - Redis port (Default: 6379) ## REDIS_PASSWORD - Redis password (Default: "") -## WIPE_REDIS_ON_START - Wipes data / index on start (Default: False) +## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True) # REDIS_HOST=localhost # REDIS_PORT=6379 # REDIS_PASSWORD= -# WIPE_REDIS_ON_START=False +# WIPE_REDIS_ON_START=True ### WEAVIATE ## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage diff --git a/autogpt/config/config.py b/autogpt/config/config.py index b1241d546..0c4576da2 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -112,7 +112,7 @@ class Config(metaclass=Singleton): self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_password = os.getenv("REDIS_PASSWORD", "") - self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "False") == "True" + self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt") # Note that indexes must be created on db 0 in redis, this is not configurable.