################################################################################ ### AUTO-GPT - GENERAL SETTINGS ################################################################################ ## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False) ## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True) # EXECUTE_LOCAL_COMMANDS=False # RESTRICT_TO_WORKSPACE=True ## USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" ## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) # AI_SETTINGS_FILE=ai_settings.yaml ################################################################################ ### LLM PROVIDER ################################################################################ ### OPENAI ## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key) ## TEMPERATURE - Sets temperature in OpenAI (Default: 0) ## USE_AZURE - Use Azure OpenAI or not (Default: False) OPENAI_API_KEY=your-openai-api-key # TEMPERATURE=0 # USE_AZURE=False ### AZURE # moved to `azure.yaml.template` ################################################################################ ### LLM MODELS ################################################################################ ## SMART_LLM_MODEL - Smart language model (Default: gpt-4) ## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo) # SMART_LLM_MODEL=gpt-4 # FAST_LLM_MODEL=gpt-3.5-turbo ### LLM MODEL SETTINGS ## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000) ## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000) ## When using --gpt3only this needs to be set to 4000. # FAST_TOKEN_LIMIT=4000 # SMART_TOKEN_LIMIT=8000 ################################################################################ ### MEMORY ################################################################################ ### MEMORY_BACKEND - Memory backend type ## local - Default ## pinecone - Pinecone (if configured) ## redis - Redis (if configured) ## milvus - Milvus (if configured) ## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt) # MEMORY_BACKEND=local # MEMORY_INDEX=auto-gpt ### PINECONE ## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key) ## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2) # PINECONE_API_KEY=your-pinecone-api-key # PINECONE_ENV=your-pinecone-region ### REDIS ## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) ## REDIS_PORT - Redis port (Default: 6379) ## REDIS_PASSWORD - Redis password (Default: "") ## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True) # REDIS_HOST=localhost # REDIS_PORT=6379 # REDIS_PASSWORD= # WIPE_REDIS_ON_START=True ### WEAVIATE ## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage ## WEAVIATE_HOST - Weaviate host IP ## WEAVIATE_PORT - Weaviate host port ## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http') ## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate ## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate ## WEAVIATE_USERNAME - Weaviate username ## WEAVIATE_PASSWORD - Weaviate password ## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication # WEAVIATE_HOST="127.0.0.1" # WEAVIATE_PORT=8080 # WEAVIATE_PROTOCOL="http" # USE_WEAVIATE_EMBEDDED=False # WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" # WEAVIATE_USERNAME= # WEAVIATE_PASSWORD= # WEAVIATE_API_KEY= ### MILVUS ## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) ## MILVUS_COLLECTION - Milvus collection, ## change it if you want to start a new memory and retain the old memory. # MILVUS_ADDR=your-milvus-cluster-host-port # MILVUS_COLLECTION=autogpt ################################################################################ ### IMAGE GENERATION PROVIDER ################################################################################ ### OPEN AI ## IMAGE_PROVIDER - Image provider (Example: dalle) ## IMAGE_SIZE - Image size (Example: 256) ## DALLE: 256, 512, 1024 # IMAGE_PROVIDER=dalle # IMAGE_SIZE=256 ### HUGGINGFACE ## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4) ## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token) # HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4 # HUGGINGFACE_API_TOKEN=your-huggingface-api-token ### STABLE DIFFUSION WEBUI ## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password) ## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860) # SD_WEBUI_AUTH= # SD_WEBUI_URL=http://127.0.0.1:7860 ################################################################################ ### AUDIO TO TEXT PROVIDER ################################################################################ ### HUGGINGFACE # HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h ################################################################################ ### GIT Provider for repository actions ################################################################################ ### GITHUB ## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123) ## GITHUB_USERNAME - Github username # GITHUB_API_KEY=github_pat_123 # GITHUB_USERNAME=your-github-username ################################################################################ ### WEB BROWSING ################################################################################ ### BROWSER ## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True) ## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome). ## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser # HEADLESS_BROWSER=True # USE_WEB_BROWSER=chrome ## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise ) # BROWSE_CHUNK_MAX_LENGTH=3000 ## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm # BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm ### GOOGLE ## GOOGLE_API_KEY - Google API key (Example: my-google-api-key) ## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id) # GOOGLE_API_KEY=your-google-api-key # CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id ################################################################################ ### TTS PROVIDER ################################################################################ ### MAC OS ## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False) # USE_MAC_OS_TTS=False ### STREAMELEMENTS ## USE_BRIAN_TTS - Use Brian TTS or not (Default: False) # USE_BRIAN_TTS=False ### ELEVENLABS ## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key) ## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1) ## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2) # ELEVENLABS_API_KEY=your-elevenlabs-api-key # ELEVENLABS_VOICE_1_ID=your-voice-id-1 # ELEVENLABS_VOICE_2_ID=your-voice-id-2 ################################################################################ ### TWITTER API ################################################################################ # TW_CONSUMER_KEY= # TW_CONSUMER_SECRET= # TW_ACCESS_TOKEN= # TW_ACCESS_TOKEN_SECRET= ################################################################################ ### ALLOWLISTED PLUGINS ################################################################################ #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) ALLOWLISTED_PLUGINS=