diff --git a/.gitignore b/.gitignore
index eda7f3273..2220ef6e3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
+autogpt/auto_gpt_workspace
package-lock.json
*.pyc
auto_gpt_workspace/*
diff --git a/README.md b/README.md
index f60aa9ffb..71957748f 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,28 @@
# Auto-GPT: An Autonomous GPT-4 Experiment
+[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
+[](https://discord.gg/autogpt)
+[](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
+[](https://twitter.com/SigGravitas)
+
+## π‘ Get help - [Q&A](https://github.com/Significant-Gravitas/Auto-GPT/discussions/categories/q-a) or [Discord π¬](https://discord.gg/autogpt)
+
+
+
### π΄ π΄ π΄ Urgent: USE `stable` not `master` π΄ π΄ π΄
-> This information takes precedence over any other information provided and should be followed accordingly.
+**Download the latest `stable` release from here: https://github.com/Significant-Gravitas/Auto-GPT/releases/latest.**
+The `master` branch may often be in a **broken** state.
-Our workflow has been improved, but please note that `master` branch may often be in a **broken** state.
-Please download the latest `stable` release from here: https://github.com/Torantulino/Auto-GPT/releases/latest.
+
-
-[](https://twitter.com/SigGravitas)
-[](https://discord.gg/autogpt)
-[](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
-### Demo (30/03/2023):
+ Demo April 16th 2023
-https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
+https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
+
+Demo made by Blake Werlinger
π Help Fund Auto-GPT's Development π
@@ -37,42 +44,6 @@ Development of this free, open-source project is made possible by all the
-
-
-## Table of Contents
-
-- [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment)
- - [π΄ π΄ π΄ Urgent: USE `stable` not `master` π΄ π΄ π΄](#----urgent-use-stable-not-master----)
- - [Demo (30/03/2023):](#demo-30032023)
- - [Table of Contents](#table-of-contents)
- - [π Features](#-features)
- - [π Requirements](#-requirements)
- - [πΎ Installation](#-installation)
- - [π§ Usage](#-usage)
- - [Logs](#logs)
- - [Docker](#docker)
- - [Command Line Arguments](#command-line-arguments)
- - [π£οΈ Speech Mode](#οΈ-speech-mode)
- - [π Google API Keys Configuration](#-google-api-keys-configuration)
- - [Setting up environment variables](#setting-up-environment-variables)
- - [Memory Backend Setup](#memory-backend-setup)
- - [Redis Setup](#redis-setup)
- - [π² Pinecone API Key Setup](#-pinecone-api-key-setup)
- - [Milvus Setup](#milvus-setup)
- - [Weaviate Setup](#weaviate-setup)
- - [Setting up environment variables](#setting-up-environment-variables-1)
- - [Setting Your Cache Type](#setting-your-cache-type)
- - [View Memory Usage](#view-memory-usage)
- - [π§ Memory pre-seeding](#-memory-pre-seeding)
- - [π Continuous Mode β οΈ](#-continuous-mode-οΈ)
- - [GPT3.5 ONLY Mode](#gpt35-only-mode)
- - [πΌ Image Generation](#-image-generation)
- - [β οΈ Limitations](#οΈ-limitations)
- - [π‘ Disclaimer](#-disclaimer)
- - [π¦ Connect with Us on Twitter](#-connect-with-us-on-twitter)
- - [Run tests](#run-tests)
- - [Run linter](#run-linter)
-
## π Features
- π Internet access for searches and information gathering
@@ -83,16 +54,17 @@ Development of this free, open-source project is made possible by all the ) to your own ID
- azure_model_map:
- fast_llm_model_deployment_id: ""
- ...
- ```
- - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
+ - See [OpenAI API Keys Configuration](#openai-api-keys-configuration) to obtain your OpenAI API key.
+ - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
+ - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then follow these steps:
+ - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section:
+ - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment ID
+ - `smart_llm_model_deployment_id` - your gpt-4 deployment ID
+ - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment ID
+ - Please specify all of these values as double-quoted strings
+ ```yaml
+ # Replace string in angled brackets (<>) to your own ID
+ azure_model_map:
+ fast_llm_model_deployment_id: ""
+ ...
+ ```
+ - Details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
## π§ Usage
1. Run `autogpt` Python module in your terminal
-```
-python -m autogpt
-```
+ ```
+ python -m autogpt
+ ```
2. After each action, choose from options to authorize command(s),
exit the program, or provide feedback to the AI.
@@ -175,30 +150,40 @@ python -m autogpt --debug
You can also build this into a docker image and run it:
-```
+```bash
docker build -t autogpt .
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt
```
-You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode:
+Or if you have `docker-compose`:
+```bash
+docker-compose run --build --rm auto-gpt
```
+
+You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode:
+```bash
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous
```
+```bash
+docker-compose run --build --rm auto-gpt --gpt3only --continuous
+```
+
### Command Line Arguments
Here are some common arguments you can use when running Auto-GPT:
> Replace anything in angled brackets (<>) to a value you want to specify
+
* View all available command line arguments
-```bash
-python -m autogpt --help
-```
+ ```bash
+ python -m autogpt --help
+ ```
* Run Auto-GPT with a different AI Settings file
-```bash
-python -m autogpt --ai-settings
-```
+ ```bash
+ python -m autogpt --ai-settings
+ ```
* Specify a memory backend
-```bash
-python -m autogpt --use-memory
-```
+ ```bash
+ python -m autogpt --use-memory
+ ```
> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python -m autogpt --help` for more information
@@ -285,30 +270,24 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that
### Redis Setup
> _**CAUTION**_ \
This is not intended to be publicly accessible and lacks security measures. Therefore, avoid exposing Redis to the internet without a password or at all
-1. Install docker desktop
-```bash
-docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
-```
-> See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
+1. Install docker (or Docker Desktop on Windows)
+2. Launch Redis container
+ ```bash
+ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
+ ```
+ > See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
+3. Set the following settings in `.env`
+ > Replace **PASSWORD** in angled brackets (<>)
+ ```bash
+ MEMORY_BACKEND=redis
+ REDIS_HOST=localhost
+ REDIS_PORT=6379
+ REDIS_PASSWORD=
+ ```
-2. Set the following environment variables
-> Replace **PASSWORD** in angled brackets (<>)
-```bash
-MEMORY_BACKEND=redis
-REDIS_HOST=localhost
-REDIS_PORT=6379
-REDIS_PASSWORD=
-```
-You can optionally set
-
-```bash
-WIPE_REDIS_ON_START=False
-```
-
-To persist memory stored in Redis
+ You can optionally set `WIPE_REDIS_ON_START=False` to persist memory stored in Redis.
You can specify the memory index for redis using the following:
-
```bash
MEMORY_INDEX=
```
@@ -353,8 +332,9 @@ export MEMORY_BACKEND="pinecone"
- or setup by [Zilliz Cloud](https://zilliz.com/cloud)
- set `MILVUS_ADDR` in `.env` to your milvus address `host:ip`.
- set `MEMORY_BACKEND` in `.env` to `milvus` to enable milvus as backend.
-- optional
- - set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name.
+
+**Optional:**
+- set `MILVUS_COLLECTION` in `.env` to change milvus collection name as you want, `autogpt` is the default name.
### Weaviate Setup
@@ -380,7 +360,7 @@ MEMORY_INDEX="Autogpt" # name of the index to create for the application
## View Memory Usage
-1. View memory usage by using the `--debug` flag :)
+View memory usage by using the `--debug` flag :)
## π§ Memory pre-seeding
@@ -415,7 +395,7 @@ You can adjust the `max_length` and overlap parameters to fine-tune the way the
Memory pre-seeding is a technique for improving AI accuracy by ingesting relevant data into its memory. Chunks of data are split and added to memory, allowing the AI to access them quickly and generate more accurate responses. It's useful for large datasets or when specific information needs to be accessed quickly. Examples include ingesting API or GitHub documentation before running Auto-GPT.
-β οΈ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START` set to `False` in your `.env` file.
+β οΈ If you use Redis as your memory, make sure to run Auto-GPT with the `WIPE_REDIS_ON_START=False` in your `.env` file.
β οΈFor other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the `data_ingestion.py` script anytime during an Auto-GPT run.
@@ -430,9 +410,9 @@ Use at your own risk.
1. Run the `autogpt` python module in your terminal:
-```bash
-python -m autogpt --speak --continuous
-```
+ ```bash
+ python -m autogpt --speak --continuous
+ ```
2. To exit the program, press Ctrl + C
diff --git a/autogpt/app.py b/autogpt/app.py
index 48db03665..d3a5e57c7 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -17,6 +17,7 @@ from autogpt.commands.file_operations import (
read_file,
search_files,
write_to_file,
+ download_file
)
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
@@ -163,6 +164,10 @@ def execute_command(command_name: str, arguments):
return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
+ elif command_name == "download_file":
+ if not CFG.allow_downloads:
+ return "Error: You do not have user authorization to download files locally."
+ return download_file(arguments["url"], arguments["file"])
elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
diff --git a/autogpt/args.py b/autogpt/args.py
index eca323347..f0e9c07a3 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -1,7 +1,7 @@
"""This module contains the argument parsing logic for the script."""
import argparse
-from colorama import Fore
+from colorama import Fore, Back, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
@@ -63,6 +63,12 @@ def parse_arguments() -> None:
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
+ parser.add_argument(
+ '--allow-downloads',
+ action='store_true',
+ dest='allow_downloads',
+ help='Dangerous: Allows Auto-GPT to download files natively.'
+ )
args = parser.parse_args()
if args.debug:
@@ -133,5 +139,13 @@ def parse_arguments() -> None:
CFG.ai_settings_file = file
CFG.skip_reprompt = True
+ if args.allow_downloads:
+ logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW,
+ f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
+ "It is recommended that you monitor any files it downloads carefully.")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
+ CFG.allow_downloads = True
+
if args.browser_name:
CFG.selenium_web_browser = args.browser_name
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index eaafa00a8..2cc797cbb 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -40,6 +40,9 @@ def execute_python_file(file: str):
try:
client = docker.from_env()
+ # You can replace 'python:3.8' with the desired Python image/version
+ # You can find available Python images on Docker Hub:
+ # https://hub.docker.com/_/python
image_name = "python:3.10"
try:
client.images.get(image_name)
@@ -57,9 +60,6 @@ def execute_python_file(file: str):
elif status:
print(status)
- # You can replace 'python:3.8' with the desired Python image/version
- # You can find available Python images on Docker Hub:
- # https://hub.docker.com/_/python
container = client.containers.run(
image_name,
f"python {file}",
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 8abc2e232..d273c1a34 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -4,9 +4,16 @@ from __future__ import annotations
import os
import os.path
from pathlib import Path
-from typing import Generator
+from typing import Generator, List
+import requests
+from requests.adapters import HTTPAdapter
+from requests.adapters import Retry
+from colorama import Fore, Back
+from autogpt.spinner import Spinner
+from autogpt.utils import readable_file_size
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
+
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -214,3 +221,43 @@ def search_files(directory: str) -> list[str]:
found_files.append(relative_path)
return found_files
+
+
+def download_file(url, filename):
+ """Downloads a file
+ Args:
+ url (str): URL of the file to download
+ filename (str): Filename to save the file as
+ """
+ safe_filename = path_in_workspace(filename)
+ try:
+ message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
+ with Spinner(message) as spinner:
+ session = requests.Session()
+ retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
+ adapter = HTTPAdapter(max_retries=retry)
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+
+ total_size = 0
+ downloaded_size = 0
+
+ with session.get(url, allow_redirects=True, stream=True) as r:
+ r.raise_for_status()
+ total_size = int(r.headers.get('Content-Length', 0))
+ downloaded_size = 0
+
+ with open(safe_filename, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=8192):
+ f.write(chunk)
+ downloaded_size += len(chunk)
+
+ # Update the progress message
+ progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
+ spinner.update_message(f"{message} {progress}")
+
+ return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
+ except requests.HTTPError as e:
+ return f"Got an HTTP Error whilst trying to download file: {e}"
+ except Exception as e:
+ return "Error: " + str(e)
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 3ff35cf31..675eb2283 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,6 +1,7 @@
"""Git operations for autogpt"""
import git
from autogpt.config import Config
+from autogpt.workspace import path_in_workspace
CFG = Config()
@@ -16,8 +17,9 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
str: The result of the clone operation"""
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
+ safe_clone_path = path_in_workspace(clone_path)
try:
- git.Repo.clone_from(auth_repo_url, clone_path)
- return f"""Cloned {repo_url} to {clone_path}"""
+ git.Repo.clone_from(auth_repo_url, safe_clone_path)
+ return f"""Cloned {repo_url} to {safe_clone_path}"""
except Exception as e:
return f"Error: {str(e)}"
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index 22da52b04..fe6f4f325 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -24,6 +24,7 @@ class Config(metaclass=Singleton):
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
+ self.allow_downloads = False
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py
index 6c7ee1b36..9b911eeff 100644
--- a/autogpt/memory/local.py
+++ b/autogpt/memory/local.py
@@ -54,7 +54,7 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
- f"Warning: The file '{self.filename}' does not exist."
+ f"Warning: The file '{self.filename}' does not exist. "
"Local memory would not be saved to a file."
)
self.data = CacheContent()
diff --git a/autogpt/prompt.py b/autogpt/prompt.py
index 18a5736c1..a2b20b1fe 100644
--- a/autogpt/prompt.py
+++ b/autogpt/prompt.py
@@ -105,6 +105,16 @@ def get_prompt() -> str:
),
)
+ # Only add the download file command if the AI is allowed to execute it
+ if cfg.allow_downloads:
+ commands.append(
+ (
+ "Downloads a file from the internet, and stores it locally",
+ "download_file",
+ {"url": "", "file": ""}
+ ),
+ )
+
# Add these command last.
commands.append(
("Do Nothing", "do_nothing", {}),
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
index 56b4f20a6..febcea8eb 100644
--- a/autogpt/spinner.py
+++ b/autogpt/spinner.py
@@ -29,12 +29,14 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
- def __enter__(self) -> None:
+ def __enter__(self):
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
+ return self
+
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
"""Stop the spinner
@@ -48,3 +50,14 @@ class Spinner:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
+
+ def update_message(self, new_message, delay=0.1):
+ """Update the spinner message
+ Args:
+ new_message (str): New message to display
+ delay: Delay in seconds before updating the message
+ """
+ time.sleep(delay)
+ sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
+ sys.stdout.flush()
+ self.message = new_message
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 59709d02b..11d98d1b7 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -24,3 +24,16 @@ def validate_yaml_file(file: str):
)
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
+
+
+def readable_file_size(size, decimal_places=2):
+ """Converts the given size in bytes to a readable format.
+ Args:
+ size: Size in bytes
+ decimal_places (int): Number of decimal places to display
+ """
+ for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
+ if size < 1024.0:
+ break
+ size /= 1024.0
+ return f"{size:.{decimal_places}f} {unit}"
diff --git a/requirements-docker.txt b/requirements-docker.txt
index 3a8a344ca..a6018f8f9 100644
--- a/requirements-docker.txt
+++ b/requirements-docker.txt
@@ -24,4 +24,5 @@ pre-commit
black
isort
gitpython==3.1.31
-tweepy
\ No newline at end of file
+tweepy
+jsonschema
\ No newline at end of file
diff --git a/run_continuous.sh b/run_continuous.sh
index 14c9cfd2a..1f4436c88 100755
--- a/run_continuous.sh
+++ b/run_continuous.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-argument="--continuous"
-./run.sh "$argument"
+
+./run.sh --continuous $@
diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py
index e0e2f7fc8..84fd6e6d5 100644
--- a/tests/milvus_memory_test.py
+++ b/tests/milvus_memory_test.py
@@ -26,7 +26,7 @@ try:
def setUp(self) -> None:
"""Set up the test environment"""
- self.cfg = MockConfig()
+ self.cfg = mock_config()
self.memory = MilvusMemory(self.cfg)
def test_add(self) -> None: