Merge pull request #2448 from Pwuts/fix/default-config
Consolidate default config with config.py as masterpull/2367/head^2
commit
a2723f16f2
218
.env.template
218
.env.template
|
@ -1,187 +1,189 @@
|
|||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
|
||||
EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
RESTRICT_TO_WORKSPACE=True
|
||||
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
||||
BROWSE_CHUNK_MAX_LENGTH=8192
|
||||
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
||||
# BROWSE_CHUNK_MAX_LENGTH=8192
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
# TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
TEMPERATURE=0
|
||||
USE_AZURE=False
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
|
||||
### AZURE
|
||||
# cleanup azure env as already moved to `azure.yaml.template`
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
SMART_LLM_MODEL=gpt-4
|
||||
FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
# When using --gpt3only this needs to be set to 4000.
|
||||
FAST_TOKEN_LIMIT=4000
|
||||
SMART_TOKEN_LIMIT=8000
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
# local - Default
|
||||
# pinecone - Pinecone (if configured)
|
||||
# redis - Redis (if configured)
|
||||
# milvus - Milvus (if configured)
|
||||
MEMORY_BACKEND=local
|
||||
## local - Default
|
||||
## pinecone - Pinecone (if configured)
|
||||
## redis - Redis (if configured)
|
||||
## milvus - Milvus (if configured)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=local
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### PINECONE
|
||||
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
PINECONE_API_KEY=your-pinecone-api-key
|
||||
PINECONE_ENV=your-pinecone-region
|
||||
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
||||
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
||||
# PINECONE_API_KEY=your-pinecone-api-key
|
||||
# PINECONE_ENV=your-pinecone-region
|
||||
|
||||
### REDIS
|
||||
# REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
# REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PASSWORD - Redis password (Default: "")
|
||||
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
|
||||
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
WIPE_REDIS_ON_START=False
|
||||
MEMORY_INDEX=auto-gpt
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_PASSWORD=
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
### WEAVIATE
|
||||
# MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
# WEAVIATE_HOST - Weaviate host IP
|
||||
# WEAVIATE_PORT - Weaviate host port
|
||||
# WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
# USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
# WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
# WEAVIATE_USERNAME - Weaviate username
|
||||
# WEAVIATE_PASSWORD - Weaviate password
|
||||
# WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# MEMORY_INDEX - Name of index to create in Weaviate
|
||||
WEAVIATE_HOST="127.0.0.1"
|
||||
WEAVIATE_PORT=8080
|
||||
WEAVIATE_PROTOCOL="http"
|
||||
USE_WEAVIATE_EMBEDDED=False
|
||||
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
WEAVIATE_USERNAME=
|
||||
WEAVIATE_PASSWORD=
|
||||
WEAVIATE_API_KEY=
|
||||
MEMORY_INDEX=AutoGpt
|
||||
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
## WEAVIATE_HOST - Weaviate host IP
|
||||
## WEAVIATE_PORT - Weaviate host port
|
||||
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
## WEAVIATE_USERNAME - Weaviate username
|
||||
## WEAVIATE_PASSWORD - Weaviate password
|
||||
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# WEAVIATE_HOST="127.0.0.1"
|
||||
# WEAVIATE_PORT=8080
|
||||
# WEAVIATE_PROTOCOL="http"
|
||||
# USE_WEAVIATE_EMBEDDED=False
|
||||
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
# WEAVIATE_USERNAME=
|
||||
# WEAVIATE_PASSWORD=
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
### MILVUS
|
||||
# MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
|
||||
# MILVUS_COLLECTION - Milvus collection,
|
||||
# change it if you want to start a new memory and retain the old memory.
|
||||
MILVUS_ADDR=your-milvus-cluster-host-port
|
||||
MILVUS_COLLECTION=autogpt
|
||||
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
|
||||
## MILVUS_COLLECTION - Milvus collection,
|
||||
## change it if you want to start a new memory and retain the old memory.
|
||||
# MILVUS_ADDR=your-milvus-cluster-host-port
|
||||
# MILVUS_COLLECTION=autogpt
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPEN AI
|
||||
# IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
IMAGE_PROVIDER=dalle
|
||||
# IMAGE_SIZE - Image size (Example: 256)
|
||||
# DALLE: 256, 512, 1024
|
||||
IMAGE_SIZE=256
|
||||
## IMAGE_PROVIDER - Image provider (Example: dalle)
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## DALLE: 256, 512, 1024
|
||||
# IMAGE_PROVIDER=dalle
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
|
||||
### STABLE DIFFUSION WEBUI
|
||||
# SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
# SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
SD_WEBUI_AUTH=
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
# SD_WEBUI_AUTH=
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
### HUGGINGFACE
|
||||
HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
# GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
# GITHUB_USERNAME - Github username
|
||||
GITHUB_API_KEY=github_pat_123
|
||||
GITHUB_USERNAME=your-github-username
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
# USE_WEB_BROWSER - Sets the web-browser drivers to use with selenium (defaults to chrome).
|
||||
# HEADLESS_BROWSER - Whether to run the browser in headless mode (defaults to True)
|
||||
# Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
### GOOGLE
|
||||
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
GOOGLE_API_KEY=your-google-api-key
|
||||
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
USE_MAC_OS_TTS=False
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
|
||||
### STREAMELEMENTS
|
||||
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
USE_BRIAN_TTS=False
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
|
||||
### ELEVENLABS
|
||||
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
|
||||
################################################################################
|
||||
### TWITTER API
|
||||
### TWITTER API
|
||||
################################################################################
|
||||
|
||||
TW_CONSUMER_KEY=
|
||||
TW_CONSUMER_SECRET=
|
||||
TW_ACCESS_TOKEN=
|
||||
TW_ACCESS_TOKEN_SECRET=
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
|
|
@ -34,7 +34,7 @@ class Config(metaclass=Singleton):
|
|||
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "1"))
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "0"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
|
|
Loading…
Reference in New Issue