Merge remote-tracking branch 'upstream/master'

# Conflicts:
#	autogpt/app.py
#	autogpt/json_fixes/auto_fix.py
#	autogpt/json_fixes/bracket_termination.py
#	autogpt/json_fixes/master_json_fix_method.py
#	autogpt/json_utils/json_fix_llm.py
#	autogpt/json_utils/utilities.py
pull/2032/head
bingokon 2023-04-18 00:01:58 +01:00
commit 31900f6733
63 changed files with 520 additions and 324 deletions

View File

@ -5,8 +5,6 @@
EXECUTE_LOCAL_COMMANDS=False EXECUTE_LOCAL_COMMANDS=False
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory # BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
BROWSE_CHUNK_MAX_LENGTH=8192 BROWSE_CHUNK_MAX_LENGTH=8192
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
BROWSE_SUMMARY_MAX_TOKEN=300
# USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) # AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
@ -54,6 +52,7 @@ SMART_TOKEN_LIMIT=8000
# local - Default # local - Default
# pinecone - Pinecone (if configured) # pinecone - Pinecone (if configured)
# redis - Redis (if configured) # redis - Redis (if configured)
# milvus - Milvus (if configured)
MEMORY_BACKEND=local MEMORY_BACKEND=local
### PINECONE ### PINECONE
@ -63,7 +62,7 @@ PINECONE_API_KEY=your-pinecone-api-key
PINECONE_ENV=your-pinecone-region PINECONE_ENV=your-pinecone-region
### REDIS ### REDIS
# REDIS_HOST - Redis host (Default: localhost) # REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
# REDIS_PORT - Redis port (Default: 6379) # REDIS_PORT - Redis port (Default: 6379)
# REDIS_PASSWORD - Redis password (Default: "") # REDIS_PASSWORD - Redis password (Default: "")
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False) # WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)

4
.envrc Normal file
View File

@ -0,0 +1,4 @@
# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt

10
.flake8
View File

@ -1,12 +1,12 @@
[flake8] [flake8]
max-line-length = 88 max-line-length = 88
extend-ignore = E203 select = "E303, W293, W291, W292, E305, E231, E302"
exclude = exclude =
.tox, .tox,
__pycache__, __pycache__,
*.pyc, *.pyc,
.env .env
venv/* venv*/*,
.venv/* .venv/*,
reports/* reports/*,
dist/* dist/*,

View File

@ -30,4 +30,4 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. --> <!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guide lines. --> <!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->

View File

@ -32,7 +32,15 @@ jobs:
- name: Lint with flake8 - name: Lint with flake8
continue-on-error: false continue-on-error: false
run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302 run: flake8
- name: Check black formatting
continue-on-error: false
run: black . --check
- name: Check isort formatting
continue-on-error: false
run: isort . --check
- name: Run unittest tests with coverage - name: Run unittest tests with coverage
run: | run: |

28
.github/workflows/pr-label.yml vendored Normal file
View File

@ -0,0 +1,28 @@
name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
pull_request_target:
types: [opened, synchronize]
concurrency:
group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
cancel-in-progress: true
jobs:
conflicts:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Update PRs with conflict labels
uses: eps1lon/actions-label-merge-conflict@releases/2.x
with:
dirtyLabel: "conflicts"
#removeOnDirtyLabel: "PR: ready to ship"
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."

1
.gitignore vendored
View File

@ -127,6 +127,7 @@ celerybeat.pid
*.sage.py *.sage.py
# Environments # Environments
.direnv/
.env .env
.venv .venv
env/ env/

View File

@ -1,10 +0,0 @@
[settings]
profile = black
multi_line_output = 3
include_trailing_comma = True
force_grid_wrap = 0
use_parentheses = True
ensure_newline_before_comments = True
line_length = 88
skip = venv,env,node_modules,.env,.venv,dist
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER

View File

@ -1,39 +1,32 @@
repos: repos:
- repo: https://github.com/sourcery-ai/sourcery
rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
hooks:
- id: sourcery
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v0.9.2 rev: v0.9.2
hooks: hooks:
- id: check-added-large-files - id: check-added-large-files
args: [ '--maxkb=500' ] args: ['--maxkb=500']
- id: check-byte-order-marker - id: check-byte-order-marker
- id: check-case-conflict - id: check-case-conflict
- id: check-merge-conflict - id: check-merge-conflict
- id: check-symlinks - id: check-symlinks
- id: debug-statements - id: debug-statements
- repo: local - repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks: hooks:
- id: isort - id: isort
name: isort-local language_version: python3.10
entry: isort
language: python - repo: https://github.com/psf/black
types: [ python ] rev: 23.3.0
exclude: .+/(dist|.venv|venv|build)/.+ hooks:
pass_filenames: true
- id: black - id: black
name: black-local language_version: python3.10
entry: black
language: python - repo: local
types: [ python ] hooks:
exclude: .+/(dist|.venv|venv|build)/.+
pass_filenames: true
- id: pytest-check - id: pytest-check
name: pytest-check name: pytest-check
entry: pytest --cov=autogpt --without-integration --without-slow-integration entry: pytest --cov=autogpt --without-integration --without-slow-integration
language: system language: system
pass_filenames: false pass_filenames: false
always_run: true always_run: true

View File

@ -5,6 +5,16 @@ FROM python:3.11-slim
RUN apt-get -y update RUN apt-get -y update
RUN apt-get -y install git chromium-driver RUN apt-get -y install git chromium-driver
# Install Xvfb and other dependencies for headless browser testing
RUN apt-get update \
&& apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
# Install Firefox / Chromium
RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
&& echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
&& apt-get update \
&& apt-get install -y chromium firefox-esr
# Set environment variables # Set environment variables
ENV PIP_NO_CACHE_DIR=yes \ ENV PIP_NO_CACHE_DIR=yes \
PYTHONUNBUFFERED=1 \ PYTHONUNBUFFERED=1 \
@ -17,8 +27,9 @@ RUN chown appuser:appuser /home/appuser
USER appuser USER appuser
# Copy the requirements.txt file and install the requirements # Copy the requirements.txt file and install the requirements
COPY --chown=appuser:appuser requirements-docker.txt . COPY --chown=appuser:appuser requirements.txt .
RUN pip install --no-cache-dir --user -r requirements-docker.txt RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
pip install --no-cache-dir --user -r requirements.txt
# Copy the application files # Copy the application files
COPY --chown=appuser:appuser autogpt/ ./autogpt COPY --chown=appuser:appuser autogpt/ ./autogpt

View File

@ -65,8 +65,21 @@ Development of this free, open-source project is made possible by all the <a hre
- [Pinecone](https://www.pinecone.io/) - [Pinecone](https://www.pinecone.io/)
- [Milvus](https://milvus.io/) - [Milvus](https://milvus.io/)
- [Redis](https://redis.io) - [Redis](https://redis.io)
- [Weaviate](https://weaviate.io)
- ElevenLabs Key (If you want the AI to speak) - ElevenLabs Key (If you want the AI to speak)
## ⚠️ OpenAI API Keys Configuration ⚠️
Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
To use OpenAI API key for Auto-GPT, you **NEED** to have billing set up (AKA paid account).
You can set up paid account at https://platform.openai.com/account/billing/overview.
![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png)
#### **PLEASE ENSURE YOU HAVE DONE THIS STEP BEFORE PROCEEDING, OTHERWISE NOTHING WILL WORK!**
## 💾 Installation ## 💾 Installation
To install Auto-GPT, follow these steps: To install Auto-GPT, follow these steps:
@ -207,18 +220,6 @@ python -m autogpt --speak
- Adam : pNInz6obpgDQGcFmaJgB - Adam : pNInz6obpgDQGcFmaJgB
- Sam : yoZ06aMxZJJ28mfd3POQ - Sam : yoZ06aMxZJJ28mfd3POQ
## OpenAI API Keys Configuration
Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
To use OpenAI API key for Auto-GPT, you NEED to have billing set up (AKA paid account).
You can set up paid account at https://platform.openai.com/account/billing/overview.
![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./docs/imgs/openai-api-key-billing-paid-account.png)
## 🔍 Google API Keys Configuration ## 🔍 Google API Keys Configuration
This section is optional, use the official google api if you are having issues with error 429 when running a google search. This section is optional, use the official google api if you are having issues with error 429 when running a google search.
@ -325,7 +326,7 @@ export MEMORY_BACKEND="pinecone"
### Milvus Setup ### Milvus Setup
[Milvus](https://milvus.io/) is a open-source, high scalable vector database to storage huge amount of vector-based memory and provide fast relevant search. [Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search.
- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues. - setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues.
- setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md) - setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md)
@ -341,6 +342,14 @@ export MEMORY_BACKEND="pinecone"
[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart). [Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store data objects and vector embeddings from ML-models and scales seamlessly to billion of data objects. [An instance of Weaviate can be created locally (using Docker), on Kubernetes or using Weaviate Cloud Services](https://weaviate.io/developers/weaviate/quickstart).
Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`. Although still experimental, [Embedded Weaviate](https://weaviate.io/developers/weaviate/installation/embedded) is supported which allows the Auto-GPT process itself to start a Weaviate instance. To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip install "weaviate-client>=3.15.4"`.
#### Install the Weaviate client
Install the Weaviate client before usage.
```
$ pip install weaviate-client
```
#### Setting up environment variables #### Setting up environment variables
In your `.env` file set the following: In your `.env` file set the following:

View File

@ -1,12 +1,15 @@
"""Main script for the autogpt package.""" """Main script for the autogpt package."""
import logging import logging
from colorama import Fore from colorama import Fore
from autogpt.agent.agent import Agent from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments from autogpt.args import parse_arguments
from autogpt.config import Config, check_openai_api_key from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_memory from autogpt.memory import get_memory
from autogpt.prompt import construct_prompt from autogpt.prompt import construct_prompt
# Load environment variables from .env file # Load environment variables from .env file

View File

@ -1,6 +1,6 @@
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config from autogpt.config import Config
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
@ -84,7 +84,7 @@ class Agent:
# Print Assistant thoughts # Print Assistant thoughts
if assistant_reply_json != {}: if assistant_reply_json != {}:
validate_json(assistant_reply_json, 'llm_response_format_1') validate_json(assistant_reply_json, "llm_response_format_1")
# Get command name and arguments # Get command name and arguments
try: try:
print_assistant_thoughts(self.ai_name, assistant_reply_json) print_assistant_thoughts(self.ai_name, assistant_reply_json)
@ -115,9 +115,12 @@ class Agent:
console_input = clean_input( console_input = clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL Fore.MAGENTA + "Input:" + Style.RESET_ALL
) )
if console_input.lower().rstrip() == "y": if console_input.lower().strip() == "y":
user_input = "GENERATE NEXT COMMAND JSON" user_input = "GENERATE NEXT COMMAND JSON"
break break
elif console_input.lower().strip() == "":
print("Invalid input format.")
continue
elif console_input.lower().startswith("y -"): elif console_input.lower().startswith("y -"):
try: try:
self.next_action_count = abs( self.next_action_count = abs(

View File

@ -1,8 +1,10 @@
"""Agent manager for managing GPT agents""" """Agent manager for managing GPT agents"""
from __future__ import annotations from __future__ import annotations
from autogpt.llm_utils import create_chat_completion from typing import Union
from autogpt.config.config import Singleton from autogpt.config.config import Singleton
from autogpt.llm_utils import create_chat_completion
class AgentManager(metaclass=Singleton): class AgentManager(metaclass=Singleton):

View File

@ -1,31 +1,36 @@
""" Command and Control """ """ Command and Control """
import json import json
from typing import List, NoReturn, Union, Dict from typing import Dict, List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.improve_code import improve_code
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.commands.image_gen import generate_image
from autogpt.commands.audio_text import read_audio_from_file from autogpt.commands.audio_text import read_audio_from_file
from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.execute_code import execute_python_file, execute_shell from autogpt.commands.execute_code import (
execute_python_file,
execute_shell,
execute_shell_popen,
)
from autogpt.commands.file_operations import ( from autogpt.commands.file_operations import (
append_to_file, append_to_file,
delete_file, delete_file,
download_file,
read_file, read_file,
search_files, search_files,
write_to_file, write_to_file,
download_file
) )
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.image_gen import generate_image
from autogpt.commands.improve_code import improve_code
from autogpt.commands.twitter import send_tweet
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.json_utils.json_fix_llm import fix_and_parse_json
from autogpt.memory import get_memory from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text from autogpt.processing.text import summarize_text
from autogpt.speech import say_text from autogpt.speech import say_text
from autogpt.commands.web_selenium import browse_website
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.twitter import send_tweet
CFG = Config() CFG = Config()
AGENT_MANAGER = AgentManager() AGENT_MANAGER = AgentManager()
@ -111,11 +116,10 @@ def execute_command(command_name: str, arguments):
arguments (dict): The arguments for the command arguments (dict): The arguments for the command
Returns: Returns:
str: The result of the command""" str: The result of the command
memory = get_memory(CFG) """
try: try:
command_name = map_command_synonyms(command_name) command_name = map_command_synonyms(command_name.lower())
if command_name == "google": if command_name == "google":
# Check if the Google API key is set and use the official search method # Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial # If the API key is not set or has only whitespaces, use the unofficial
@ -129,12 +133,16 @@ def execute_command(command_name: str, arguments):
# google_result can be a list or a string depending on the search results # google_result can be a list or a string depending on the search results
if isinstance(google_result, list): if isinstance(google_result, list):
safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result] safe_message = [
google_result_single.encode("utf-8", "ignore")
for google_result_single in google_result
]
else: else:
safe_message = google_result.encode('utf-8', 'ignore') safe_message = google_result.encode("utf-8", "ignore")
return str(safe_message) return safe_message.decode("utf-8")
elif command_name == "memory_add": elif command_name == "memory_add":
memory = get_memory(CFG)
return memory.add(arguments["string"]) return memory.add(arguments["string"])
elif command_name == "start_agent": elif command_name == "start_agent":
return start_agent( return start_agent(
@ -190,6 +198,15 @@ def execute_command(command_name: str, arguments):
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config. Do not attempt to bypass the restriction." "in your config. Do not attempt to bypass the restriction."
) )
elif command_name == "execute_shell_popen":
if CFG.execute_local_commands:
return execute_shell_popen(arguments["command_line"])
else:
return (
"You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config. Do not attempt to bypass the restriction."
)
elif command_name == "read_audio_from_file": elif command_name == "read_audio_from_file":
return read_audio_from_file(arguments["file"]) return read_audio_from_file(arguments["file"])
elif command_name == "generate_image": elif command_name == "generate_image":
@ -211,7 +228,7 @@ def execute_command(command_name: str, arguments):
def get_text_summary(url: str, question: str) -> str: def get_text_summary(url: str, question: str) -> str:
"""Return the results of a google search """Return the results of a Google search
Args: Args:
url (str): The url to scrape url (str): The url to scrape
@ -226,7 +243,7 @@ def get_text_summary(url: str, question: str) -> str:
def get_hyperlinks(url: str) -> Union[str, List[str]]: def get_hyperlinks(url: str) -> Union[str, List[str]]:
"""Return the results of a google search """Return the results of a Google search
Args: Args:
url (str): The url to scrape url (str): The url to scrape

View File

@ -1,7 +1,8 @@
"""This module contains the argument parsing logic for the script.""" """This module contains the argument parsing logic for the script."""
import argparse import argparse
from colorama import Fore, Back, Style from colorama import Back, Fore, Style
from autogpt import utils from autogpt import utils
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger from autogpt.logs import logger
@ -64,10 +65,10 @@ def parse_arguments() -> None:
" skip the re-prompt.", " skip the re-prompt.",
) )
parser.add_argument( parser.add_argument(
'--allow-downloads', "--allow-downloads",
action='store_true', action="store_true",
dest='allow_downloads', dest="allow_downloads",
help='Dangerous: Allows Auto-GPT to download files natively.' help="Dangerous: Allows Auto-GPT to download files natively.",
) )
args = parser.parse_args() args = parser.parse_args()
@ -141,10 +142,17 @@ def parse_arguments() -> None:
if args.allow_downloads: if args.allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
logger.typewriter_log("WARNING: ", Fore.YELLOW, logger.typewriter_log(
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " + "WARNING: ",
"It is recommended that you monitor any files it downloads carefully.") Fore.YELLOW,
logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}") f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
+ "It is recommended that you monitor any files it downloads carefully.",
)
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
)
CFG.allow_downloads = True CFG.allow_downloads = True
if args.browser_name: if args.browser_name:

View File

@ -1,6 +1,7 @@
import requests
import json import json
import requests
from autogpt.config import Config from autogpt.config import Config
from autogpt.workspace import path_in_workspace from autogpt.workspace import path_in_workspace

View File

@ -5,10 +5,10 @@ import subprocess
import docker import docker
from docker.errors import ImageNotFound from docker.errors import ImageNotFound
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
def execute_python_file(file: str): def execute_python_file(file: str) -> str:
"""Execute a Python file in a Docker container and return the output """Execute a Python file in a Docker container and return the output
Args: Args:
@ -40,10 +40,10 @@ def execute_python_file(file: str):
try: try:
client = docker.from_env() client = docker.from_env()
# You can replace 'python:3.8' with the desired Python image/version # You can replace this with the desired Python image/version
# You can find available Python images on Docker Hub: # You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python # https://hub.docker.com/_/python
image_name = "python:3.10" image_name = "python:3-alpine"
try: try:
client.images.get(image_name) client.images.get(image_name)
print(f"Image '{image_name}' found locally") print(f"Image '{image_name}' found locally")
@ -114,6 +114,36 @@ def execute_shell(command_line: str) -> str:
return output return output
def execute_shell_popen(command_line) -> str:
"""Execute a shell command with Popen and returns an english description
of the event and the process id
Args:
command_line (str): The command line to execute
Returns:
str: Description of the fact that the process started and its id
"""
current_dir = os.getcwd()
if WORKING_DIRECTORY not in current_dir: # Change dir into workspace if necessary
work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY)
os.chdir(work_dir)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
do_not_show_output = subprocess.DEVNULL
process = subprocess.Popen(
command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
)
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return f"Subprocess started with PID:'{str(process.pid)}'"
def we_are_running_in_a_docker_container() -> bool: def we_are_running_in_a_docker_container() -> bool:
"""Check if we are running in a Docker container """Check if we are running in a Docker container

View File

@ -5,14 +5,14 @@ import os
import os.path import os.path
from pathlib import Path from pathlib import Path
from typing import Generator, List from typing import Generator, List
import requests import requests
from requests.adapters import HTTPAdapter from colorama import Back, Fore
from requests.adapters import Retry from requests.adapters import HTTPAdapter, Retry
from colorama import Fore, Back
from autogpt.spinner import Spinner from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size from autogpt.utils import readable_file_size
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt" LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@ -47,7 +47,7 @@ def log_operation(operation: str, filename: str) -> None:
with open(LOG_FILE_PATH, "w", encoding="utf-8") as f: with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
f.write("File Operation Logger ") f.write("File Operation Logger ")
append_to_file(LOG_FILE, log_entry, shouldLog = False) append_to_file(LOG_FILE, log_entry, shouldLog=False)
def split_file( def split_file(
@ -70,9 +70,14 @@ def split_file(
while start < content_length: while start < content_length:
end = start + max_length end = start + max_length
if end + overlap < content_length: if end + overlap < content_length:
chunk = content[start : end + overlap] chunk = content[start : end + overlap - 1]
else: else:
chunk = content[start:content_length] chunk = content[start:content_length]
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
if len(chunk) <= overlap:
break
yield chunk yield chunk
start += max_length - overlap start += max_length - overlap
@ -236,23 +241,23 @@ def download_file(url, filename):
session = requests.Session() session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504]) retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry) adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter) session.mount("http://", adapter)
session.mount('https://', adapter) session.mount("https://", adapter)
total_size = 0 total_size = 0
downloaded_size = 0 downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r: with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status() r.raise_for_status()
total_size = int(r.headers.get('Content-Length', 0)) total_size = int(r.headers.get("Content-Length", 0))
downloaded_size = 0 downloaded_size = 0
with open(safe_filename, 'wb') as f: with open(safe_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192): for chunk in r.iter_content(chunk_size=8192):
f.write(chunk) f.write(chunk)
downloaded_size += len(chunk) downloaded_size += len(chunk)
# Update the progress message # Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}" progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}") spinner.update_message(f"{message} {progress}")

View File

@ -1,5 +1,6 @@
"""Git operations for autogpt""" """Git operations for autogpt"""
import git import git
from autogpt.config import Config from autogpt.config import Config
from autogpt.workspace import path_in_workspace from autogpt.workspace import path_in_workspace
@ -7,7 +8,7 @@ CFG = Config()
def clone_repository(repo_url: str, clone_path: str) -> str: def clone_repository(repo_url: str, clone_path: str) -> str:
"""Clone a github repository locally """Clone a GitHub repository locally
Args: Args:
repo_url (str): The URL of the repository to clone repo_url (str): The URL of the repository to clone

View File

@ -11,7 +11,7 @@ CFG = Config()
def google_search(query: str, num_results: int = 8) -> str: def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search """Return the results of a Google search
Args: Args:
query (str): The search query. query (str): The search query.
@ -35,7 +35,7 @@ def google_search(query: str, num_results: int = 8) -> str:
def google_official_search(query: str, num_results: int = 8) -> str | list[str]: def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a google search using the official Google API """Return the results of a Google search using the official Google API
Args: Args:
query (str): The search query. query (str): The search query.

View File

@ -7,6 +7,7 @@ from base64 import b64decode
import openai import openai
import requests import requests
from PIL import Image from PIL import Image
from autogpt.config import Config from autogpt.config import Config
from autogpt.workspace import path_in_workspace from autogpt.workspace import path_in_workspace

View File

@ -1,5 +1,6 @@
import tweepy
import os import os
import tweepy
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv()

View File

@ -8,6 +8,7 @@ except ImportError:
"Playwright not installed. Please install it with 'pip install playwright' to use." "Playwright not installed. Please install it with 'pip install playwright' to use."
) )
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks from autogpt.processing.html import extract_hyperlinks, format_hyperlinks

View File

@ -4,9 +4,9 @@ from __future__ import annotations
from urllib.parse import urljoin, urlparse from urllib.parse import urljoin, urlparse
import requests import requests
from requests.compat import urljoin
from requests import Response
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from requests import Response
from requests.compat import urljoin
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory import get_memory from autogpt.memory import get_memory
@ -58,9 +58,28 @@ def check_local_file_access(url: str) -> bool:
""" """
local_prefixes = [ local_prefixes = [
"file:///", "file:///",
"file://localhost/",
"file://localhost", "file://localhost",
"http://localhost", "http://localhost",
"http://localhost/",
"https://localhost", "https://localhost",
"https://localhost/",
"http://2130706433",
"http://2130706433/",
"https://2130706433",
"https://2130706433/",
"http://127.0.0.1/",
"http://127.0.0.1",
"https://127.0.0.1/",
"https://127.0.0.1",
"https://0.0.0.0/",
"https://0.0.0.0",
"http://0.0.0.0/",
"http://0.0.0.0",
"http://0000",
"http://0000/",
"https://0000",
"https://0000/",
] ]
return any(url.startswith(prefix) for prefix in local_prefixes) return any(url.startswith(prefix) for prefix in local_prefixes)

View File

@ -1,22 +1,25 @@
"""Selenium web scraping module.""" """Selenium web scraping module."""
from __future__ import annotations from __future__ import annotations
from selenium import webdriver
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
import autogpt.processing.text as summary
from bs4 import BeautifulSoup
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.safari.options import Options as SafariOptions
import logging import logging
from pathlib import Path from pathlib import Path
from sys import platform
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
import autogpt.processing.text as summary
from autogpt.config import Config from autogpt.config import Config
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
FILE_DIR = Path(__file__).parent.parent FILE_DIR = Path(__file__).parent.parent
CFG = Config() CFG = Config()
@ -75,6 +78,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options) driver = webdriver.Safari(options=options)
else: else:
if platform == "linux" or platform == "linux2":
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--remote-debugging-port=9222")
options.add_argument("--no-sandbox") options.add_argument("--no-sandbox")
driver = webdriver.Chrome( driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options executable_path=ChromeDriverManager().install(), options=options

View File

@ -2,6 +2,7 @@
from __future__ import annotations from __future__ import annotations
import json import json
from autogpt.llm_utils import call_ai_function from autogpt.llm_utils import call_ai_function

View File

@ -2,7 +2,7 @@
This module contains the configuration classes for AutoGPT. This module contains the configuration classes for AutoGPT.
""" """
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.config.config import check_openai_api_key, Config from autogpt.config.config import Config, check_openai_api_key
from autogpt.config.singleton import AbstractSingleton, Singleton from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [ __all__ = [

View File

@ -6,6 +6,7 @@ from __future__ import annotations
import os import os
from typing import Type from typing import Type
import yaml import yaml

View File

@ -1,14 +1,13 @@
"""Configuration class to store the state of bools for different scripts access.""" """Configuration class to store the state of bools for different scripts access."""
import os import os
from colorama import Fore
from autogpt.config.singleton import Singleton
import openai import openai
import yaml import yaml
from colorama import Fore
from dotenv import load_dotenv from dotenv import load_dotenv
from autogpt.config.singleton import Singleton
load_dotenv(verbose=True) load_dotenv(verbose=True)
@ -33,7 +32,6 @@ class Config(metaclass=Singleton):
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
self.openai_api_key = os.getenv("OPENAI_API_KEY") self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = float(os.getenv("TEMPERATURE", "1")) self.temperature = float(os.getenv("TEMPERATURE", "1"))
@ -67,7 +65,7 @@ class Config(metaclass=Singleton):
self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV") self.pinecone_region = os.getenv("PINECONE_ENV")
self.weaviate_host = os.getenv("WEAVIATE_HOST") self.weaviate_host = os.getenv("WEAVIATE_HOST")
self.weaviate_port = os.getenv("WEAVIATE_PORT") self.weaviate_port = os.getenv("WEAVIATE_PORT")
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
@ -75,7 +73,9 @@ class Config(metaclass=Singleton):
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None) self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH") self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None) self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" self.use_weaviate_embedded = (
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
)
# milvus configuration, e.g., localhost:19530. # milvus configuration, e.g., localhost:19530.
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
@ -188,10 +188,6 @@ class Config(metaclass=Singleton):
"""Set the browse_website command chunk max length value.""" """Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value self.browse_chunk_max_length = value
def set_browse_summary_max_token(self, value: int) -> None:
"""Set the browse_website command summary max token value."""
self.browse_summary_max_token = value
def set_openai_api_key(self, value: str) -> None: def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value.""" """Set the OpenAI API key value."""
self.openai_api_key = value self.openai_api_key = value
@ -237,5 +233,5 @@ def check_openai_api_key() -> None:
Fore.RED Fore.RED
+ "Please set your OpenAI API key in .env or as an environment variable." + "Please set your OpenAI API key in .env or as an environment variable."
) )
print("You can get your key from https://beta.openai.com/account/api-keys") print("You can get your key from https://platform.openai.com/account/api-keys")
exit(1) exit(1)

View File

@ -42,7 +42,8 @@ def validate_json(json_object: object, schema_name: object) -> object:
logger.error("The JSON object is invalid.") logger.error("The JSON object is invalid.")
if CFG.debug_mode: if CFG.debug_mode:
logger.error( logger.error(
json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data json.dumps(json_object, indent=4)
) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:") logger.error("The following issues were found:")
for error in errors: for error in errors:

View File

@ -1,13 +1,14 @@
from __future__ import annotations from __future__ import annotations
from ast import List
import time import time
from ast import List
import openai import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError from openai.error import APIError, RateLimitError
from colorama import Fore
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger
CFG = Config() CFG = Config()
@ -70,6 +71,7 @@ def create_chat_completion(
""" """
response = None response = None
num_retries = 10 num_retries = 10
warned_user = False
if CFG.debug_mode: if CFG.debug_mode:
print( print(
Fore.GREEN Fore.GREEN
@ -101,6 +103,12 @@ def create_chat_completion(
Fore.RED + "Error: ", Fore.RED + "Error: ",
f"Reached rate limit, passing..." + Fore.RESET, f"Reached rate limit, passing..." + Fore.RESET,
) )
if not warned_user:
logger.double_check(
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
)
warned_user = True
except APIError as e: except APIError as e:
if e.http_status == 502: if e.http_status == 502:
pass pass
@ -115,13 +123,23 @@ def create_chat_completion(
) )
time.sleep(backoff) time.sleep(backoff)
if response is None: if response is None:
raise RuntimeError(f"Failed to get response after {num_retries} retries") logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. "
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if CFG.debug_mode:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
return response.choices[0].message["content"] return response.choices[0].message["content"]
def create_embedding_with_ada(text) -> list: def create_embedding_with_ada(text) -> list:
"""Create a embedding with text-ada-002 using the OpenAI SDK""" """Create an embedding with text-ada-002 using the OpenAI SDK"""
num_retries = 10 num_retries = 10
for attempt in range(num_retries): for attempt in range(num_retries):
backoff = 2 ** (attempt + 2) backoff = 2 ** (attempt + 2)

View File

@ -5,13 +5,13 @@ import os
import random import random
import re import re
import time import time
from logging import LogRecord
import traceback import traceback
from logging import LogRecord
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.speech import say_text
from autogpt.config import Config, Singleton from autogpt.config import Config, Singleton
from autogpt.speech import say_text
CFG = Config() CFG = Config()
@ -47,7 +47,7 @@ class Logger(metaclass=Singleton):
# Info handler in activity.log # Info handler in activity.log
self.file_handler = logging.FileHandler( self.file_handler = logging.FileHandler(
os.path.join(log_dir, log_file), 'a', 'utf-8' os.path.join(log_dir, log_file), "a", "utf-8"
) )
self.file_handler.setLevel(logging.DEBUG) self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter( info_formatter = AutoGptFormatter(
@ -57,7 +57,7 @@ class Logger(metaclass=Singleton):
# Error handler error.log # Error handler error.log
error_handler = logging.FileHandler( error_handler = logging.FileHandler(
os.path.join(log_dir, error_file), 'a', 'utf-8' os.path.join(log_dir, error_file), "a", "utf-8"
) )
error_handler.setLevel(logging.ERROR) error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter( error_formatter = AutoGptFormatter(
@ -79,7 +79,7 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(logging.DEBUG) self.logger.setLevel(logging.DEBUG)
def typewriter_log( def typewriter_log(
self, title="", title_color="", content="", speak_text=False, level=logging.INFO self, title="", title_color="", content="", speak_text=False, level=logging.INFO
): ):
if speak_text and CFG.speak_mode: if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}") say_text(f"{title}. {content}")
@ -95,18 +95,18 @@ class Logger(metaclass=Singleton):
) )
def debug( def debug(
self, self,
message, message,
title="", title="",
title_color="", title_color="",
): ):
self._log(title, title_color, message, logging.DEBUG) self._log(title, title_color, message, logging.DEBUG)
def warn( def warn(
self, self,
message, message,
title="", title="",
title_color="", title_color="",
): ):
self._log(title, title_color, message, logging.WARN) self._log(title, title_color, message, logging.WARN)
@ -180,10 +180,10 @@ class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str: def format(self, record: LogRecord) -> str:
if hasattr(record, "color"): if hasattr(record, "color"):
record.title_color = ( record.title_color = (
getattr(record, "color") getattr(record, "color")
+ getattr(record, "title") + getattr(record, "title")
+ " " + " "
+ Style.RESET_ALL + Style.RESET_ALL
) )
else: else:
record.title_color = getattr(record, "title") record.title_color = getattr(record, "title")
@ -291,7 +291,9 @@ def print_assistant_thoughts(ai_name, assistant_reply):
logger.error("Error: \n", call_stack) logger.error("Error: \n", call_stack)
def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None: def print_assistant_thoughts(
ai_name: object, assistant_reply_json_valid: object
) -> None:
assistant_thoughts_reasoning = None assistant_thoughts_reasoning = None
assistant_thoughts_plan = None assistant_thoughts_plan = None
assistant_thoughts_speak = None assistant_thoughts_speak = None
@ -307,9 +309,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
logger.typewriter_log( logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
) )
logger.typewriter_log( logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
)
if assistant_thoughts_plan: if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "") logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string # If it's a list, join it into a string
@ -323,9 +323,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
for line in lines: for line in lines:
line = line.lstrip("- ") line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip()) logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log( logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
)
# Speak the assistant's thoughts # Speak the assistant's thoughts
if CFG.speak_mode and assistant_thoughts_speak: if CFG.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak) say_text(assistant_thoughts_speak)

View File

@ -60,8 +60,10 @@ def get_memory(cfg, init=False):
memory = RedisMemory(cfg) memory = RedisMemory(cfg)
elif cfg.memory_backend == "weaviate": elif cfg.memory_backend == "weaviate":
if not WeaviateMemory: if not WeaviateMemory:
print("Error: Weaviate is not installed. Please install weaviate-client to" print(
" use Weaviate as a memory backend.") "Error: Weaviate is not installed. Please install weaviate-client to"
" use Weaviate as a memory backend."
)
else: else:
memory = WeaviateMemory(cfg) memory = WeaviateMemory(cfg)
elif cfg.memory_backend == "milvus": elif cfg.memory_backend == "milvus":
@ -93,5 +95,5 @@ __all__ = [
"PineconeMemory", "PineconeMemory",
"NoMemory", "NoMemory",
"MilvusMemory", "MilvusMemory",
"WeaviateMemory" "WeaviateMemory",
] ]

View File

@ -2,13 +2,13 @@ from __future__ import annotations
import dataclasses import dataclasses
import os import os
from typing import Any from typing import Any, List
import numpy as np import numpy as np
import orjson import orjson
from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada from autogpt.llm_utils import create_embedding_with_ada
from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536 EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS

View File

@ -1,11 +1,5 @@
""" Milvus memory storage provider.""" """ Milvus memory storage provider."""
from pymilvus import ( from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
connections,
FieldSchema,
CollectionSchema,
DataType,
Collection,
)
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
@ -46,7 +40,7 @@ class MilvusMemory(MemoryProviderSingleton):
self.collection.load() self.collection.load()
def add(self, data) -> str: def add(self, data) -> str:
"""Add a embedding of data into memory. """Add an embedding of data into memory.
Args: Args:
data (str): The raw text to construct embedding index. data (str): The raw text to construct embedding index.

View File

@ -53,7 +53,7 @@ class NoMemory(MemoryProviderSingleton):
""" """
return "" return ""
def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None: def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
""" """
Returns all the data in the memory that is relevant to the given data. Returns all the data in the memory that is relevant to the given data.
NoMemory always returns None. NoMemory always returns None.

View File

@ -1,9 +1,9 @@
import pinecone import pinecone
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
class PineconeMemory(MemoryProviderSingleton): class PineconeMemory(MemoryProviderSingleton):

View File

@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query from redis.commands.search.query import Query
from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
SCHEMA = [ SCHEMA = [
TextField("data"), TextField("data"),

View File

@ -1,11 +1,13 @@
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
import uuid import uuid
import weaviate import weaviate
from weaviate import Client from weaviate import Client
from weaviate.embedded import EmbeddedOptions from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5 from weaviate.util import generate_uuid5
from autogpt.config import Config
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
def default_schema(weaviate_index): def default_schema(weaviate_index):
return { return {
@ -14,7 +16,7 @@ def default_schema(weaviate_index):
{ {
"name": "raw_text", "name": "raw_text",
"dataType": ["text"], "dataType": ["text"],
"description": "original text for the embedding" "description": "original text for the embedding",
} }
], ],
} }
@ -24,22 +26,35 @@ class WeaviateMemory(MemoryProviderSingleton):
def __init__(self, cfg): def __init__(self, cfg):
auth_credentials = self._build_auth_credentials(cfg) auth_credentials = self._build_auth_credentials(cfg)
url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}' url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
if cfg.use_weaviate_embedded: if cfg.use_weaviate_embedded:
self.client = Client(embedded_options=EmbeddedOptions( self.client = Client(
hostname=cfg.weaviate_host, embedded_options=EmbeddedOptions(
port=int(cfg.weaviate_port), hostname=cfg.weaviate_host,
persistence_data_path=cfg.weaviate_embedded_path port=int(cfg.weaviate_port),
)) persistence_data_path=cfg.weaviate_embedded_path,
)
)
print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}") print(
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
)
else: else:
self.client = Client(url, auth_client_secret=auth_credentials) self.client = Client(url, auth_client_secret=auth_credentials)
self.index = cfg.memory_index self.index = WeaviateMemory.format_classname(cfg.memory_index)
self._create_schema() self._create_schema()
@staticmethod
def format_classname(index):
# weaviate uses capitalised index names
# The python client uses the following code to format
# index names before the corresponding class is created
if len(index) == 1:
return index.capitalize()
return index[0].capitalize() + index[1:]
def _create_schema(self): def _create_schema(self):
schema = default_schema(self.index) schema = default_schema(self.index)
if not self.client.schema.contains(schema): if not self.client.schema.contains(schema):
@ -47,7 +62,9 @@ class WeaviateMemory(MemoryProviderSingleton):
def _build_auth_credentials(self, cfg): def _build_auth_credentials(self, cfg):
if cfg.weaviate_username and cfg.weaviate_password: if cfg.weaviate_username and cfg.weaviate_password:
return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password) return weaviate.AuthClientPassword(
cfg.weaviate_username, cfg.weaviate_password
)
if cfg.weaviate_api_key: if cfg.weaviate_api_key:
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key) return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
else: else:
@ -57,16 +74,14 @@ class WeaviateMemory(MemoryProviderSingleton):
vector = get_ada_embedding(data) vector = get_ada_embedding(data)
doc_uuid = generate_uuid5(data, self.index) doc_uuid = generate_uuid5(data, self.index)
data_object = { data_object = {"raw_text": data}
'raw_text': data
}
with self.client.batch as batch: with self.client.batch as batch:
batch.add_data_object( batch.add_data_object(
uuid=doc_uuid, uuid=doc_uuid,
data_object=data_object, data_object=data_object,
class_name=self.index, class_name=self.index,
vector=vector vector=vector,
) )
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}" return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
@ -82,29 +97,31 @@ class WeaviateMemory(MemoryProviderSingleton):
# after a call to delete_all # after a call to delete_all
self._create_schema() self._create_schema()
return 'Obliterated' return "Obliterated"
def get_relevant(self, data, num_relevant=5): def get_relevant(self, data, num_relevant=5):
query_embedding = get_ada_embedding(data) query_embedding = get_ada_embedding(data)
try: try:
results = self.client.query.get(self.index, ['raw_text']) \ results = (
.with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \ self.client.query.get(self.index, ["raw_text"])
.with_limit(num_relevant) \ .with_near_vector({"vector": query_embedding, "certainty": 0.7})
.do() .with_limit(num_relevant)
.do()
)
if len(results['data']['Get'][self.index]) > 0: if len(results["data"]["Get"][self.index]) > 0:
return [str(item['raw_text']) for item in results['data']['Get'][self.index]] return [
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
]
else: else:
return [] return []
except Exception as err: except Exception as err:
print(f'Unexpected error {err=}, {type(err)=}') print(f"Unexpected error {err=}, {type(err)=}")
return [] return []
def get_stats(self): def get_stats(self):
result = self.client.query.aggregate(self.index) \ result = self.client.query.aggregate(self.index).with_meta_count().do()
.with_meta_count() \ class_data = result["data"]["Aggregate"][self.index]
.do()
class_data = result['data']['Aggregate'][self.index]
return class_data[0]['meta'] if class_data else {} return class_data[0]["meta"] if class_data else {}

View File

@ -1,8 +1,8 @@
"""HTML processing functions""" """HTML processing functions"""
from __future__ import annotations from __future__ import annotations
from requests.compat import urljoin
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from requests.compat import urljoin
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:

View File

@ -1,9 +1,11 @@
"""Text processing functions""" """Text processing functions"""
from typing import Generator, Optional, Dict from typing import Dict, Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webdriver import WebDriver
from autogpt.memory import get_memory
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion from autogpt.llm_utils import create_chat_completion
from autogpt.memory import get_memory
CFG = Config() CFG = Config()
MEMORY = get_memory(CFG) MEMORY = get_memory(CFG)
@ -78,7 +80,6 @@ def summarize_text(
summary = create_chat_completion( summary = create_chat_completion(
model=CFG.fast_llm_model, model=CFG.fast_llm_model,
messages=messages, messages=messages,
max_tokens=CFG.browse_summary_max_token,
) )
summaries.append(summary) summaries.append(summary)
print(f"Added chunk {i + 1} summary to memory") print(f"Added chunk {i + 1} summary to memory")
@ -95,7 +96,6 @@ def summarize_text(
return create_chat_completion( return create_chat_completion(
model=CFG.fast_llm_model, model=CFG.fast_llm_model,
messages=messages, messages=messages,
max_tokens=CFG.browse_summary_max_token,
) )

View File

@ -1,9 +1,10 @@
from colorama import Fore from colorama import Fore
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config from autogpt.config.config import Config
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.promptgenerator import PromptGenerator from autogpt.promptgenerator import PromptGenerator
from autogpt.config import Config
from autogpt.setup import prompt_user from autogpt.setup import prompt_user
from autogpt.utils import clean_input from autogpt.utils import clean_input
@ -38,6 +39,9 @@ def get_prompt() -> str:
prompt_generator.add_constraint( prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"' 'Exclusively use the commands listed in double quotes e.g. "command name"'
) )
prompt_generator.add_constraint(
"Use subprocesses for commands that will not terminate within a few minutes"
)
# Define the command list # Define the command list
commands = [ commands = [
@ -81,6 +85,7 @@ def get_prompt() -> str:
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"}, {"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
), ),
("Execute Python File", "execute_python_file", {"file": "<file>"}), ("Execute Python File", "execute_python_file", {"file": "<file>"}),
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
("Generate Image", "generate_image", {"prompt": "<prompt>"}), ("Generate Image", "generate_image", {"prompt": "<prompt>"}),
("Send Tweet", "send_tweet", {"text": "<text>"}), ("Send Tweet", "send_tweet", {"text": "<text>"}),
] ]
@ -88,11 +93,7 @@ def get_prompt() -> str:
# Only add the audio to text command if the model is specified # Only add the audio to text command if the model is specified
if cfg.huggingface_audio_to_text_model: if cfg.huggingface_audio_to_text_model:
commands.append( commands.append(
( ("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
"Convert Audio to text",
"read_audio_from_file",
{"file": "<file>"}
),
) )
# Only add shell command to the prompt if the AI is allowed to execute it # Only add shell command to the prompt if the AI is allowed to execute it
@ -104,6 +105,13 @@ def get_prompt() -> str:
{"command_line": "<command_line>"}, {"command_line": "<command_line>"},
), ),
) )
commands.append(
(
"Execute Shell Command Popen, non-interactive commands only",
"execute_shell_popen",
{"command_line": "<command_line>"},
),
)
# Only add the download file command if the AI is allowed to execute it # Only add the download file command if the AI is allowed to execute it
if cfg.allow_downloads: if cfg.allow_downloads:
@ -111,7 +119,7 @@ def get_prompt() -> str:
( (
"Downloads a file from the internet, and stores it locally", "Downloads a file from the internet, and stores it locally",
"download_file", "download_file",
{"url": "<file_url>", "file": "<saved_filename>"} {"url": "<file_url>", "file": "<saved_filename>"},
), ),
) )

View File

@ -1,5 +1,6 @@
"""Setup the AI and its goals""" """Set up the AI and its goals"""
from colorama import Fore, Style from colorama import Fore, Style
from autogpt import utils from autogpt import utils
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger from autogpt.logs import logger

View File

@ -1,5 +1,6 @@
""" Brian speech module for autogpt """ """ Brian speech module for autogpt """
import os import os
import requests import requests
from playsound import playsound from playsound import playsound
@ -13,7 +14,7 @@ class BrianSpeech(VoiceBase):
"""Setup the voices, API key, etc.""" """Setup the voices, API key, etc."""
pass pass
def _speech(self, text: str) -> bool: def _speech(self, text: str, _: int = 0) -> bool:
"""Speak text using Brian with the streamelements API """Speak text using Brian with the streamelements API
Args: Args:

View File

@ -1,8 +1,8 @@
"""ElevenLabs speech module""" """ElevenLabs speech module"""
import os import os
from playsound import playsound
import requests import requests
from playsound import playsound
from autogpt.config import Config from autogpt.config import Config
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase
@ -14,7 +14,7 @@ class ElevenLabsSpeech(VoiceBase):
"""ElevenLabs speech class""" """ElevenLabs speech class"""
def _setup(self) -> None: def _setup(self) -> None:
"""Setup the voices, API key, etc. """Set up the voices, API key, etc.
Returns: Returns:
None: None None: None

View File

@ -1,7 +1,8 @@
""" GTTS Voice. """ """ GTTS Voice. """
import os import os
from playsound import playsound
import gtts import gtts
from playsound import playsound
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase

View File

@ -1,13 +1,12 @@
""" Text to speech module """ """ Text to speech module """
from autogpt.config import Config
import threading import threading
from threading import Semaphore from threading import Semaphore
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.macos_tts import MacOSTTS
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.config import Config
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.eleven_labs import ElevenLabsSpeech
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.macos_tts import MacOSTTS
CFG = Config() CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice() DEFAULT_VOICE_ENGINE = GTTSVoice()

View File

@ -58,6 +58,8 @@ class Spinner:
delay: Delay in seconds before updating the message delay: Delay in seconds before updating the message
""" """
time.sleep(delay) time.sleep(delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message sys.stdout.write(
f"\r{' ' * (len(self.message) + 2)}\r"
) # Clear the current message
sys.stdout.flush() sys.stdout.flush()
self.message = new_message self.message = new_message

View File

@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2):
size: Size in bytes size: Size in bytes
decimal_places (int): Number of decimal places to display decimal_places (int): Number of decimal places to display
""" """
for unit in ['B', 'KB', 'MB', 'GB', 'TB']: for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0: if size < 1024.0:
break break
size /= 1024.0 size /= 1024.0

View File

@ -36,6 +36,8 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
joined_path = base.joinpath(*paths).resolve() joined_path = base.joinpath(*paths).resolve()
if not joined_path.is_relative_to(base): if not joined_path.is_relative_to(base):
raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.") raise ValueError(
f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
)
return joined_path return joined_path

View File

@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
# Read the current ai_settings.yaml file and store its content. # Read the current ai_settings.yaml file and store its content.
ai_settings = None ai_settings = None
if os.path.exists('ai_settings.yaml'): if os.path.exists("ai_settings.yaml"):
with open('ai_settings.yaml', 'r') as f: with open("ai_settings.yaml", "r") as f:
ai_settings = f.read() ai_settings = f.read()
os.remove('ai_settings.yaml') os.remove("ai_settings.yaml")
input_data = '''Entrepreneur-GPT input_data = """Entrepreneur-GPT
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
Increase net worth. Increase net worth.
Develop and manage multiple businesses autonomously. Develop and manage multiple businesses autonomously.
@ -72,27 +72,34 @@ Refocus, please.
Disappointing suggestion. Disappointing suggestion.
Not helpful. Not helpful.
Needs improvement. Needs improvement.
Not what I need.''' Not what I need."""
# TODO: add questions above, to distract it even more. # TODO: add questions above, to distract it even more.
command = f'{sys.executable} -m autogpt' command = f"{sys.executable} -m autogpt"
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, process = subprocess.Popen(
shell=True) command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout_output, stderr_output = process.communicate(input_data.encode()) stdout_output, stderr_output = process.communicate(input_data.encode())
# Decode the output and print it # Decode the output and print it
stdout_output = stdout_output.decode('utf-8') stdout_output = stdout_output.decode("utf-8")
stderr_output = stderr_output.decode('utf-8') stderr_output = stderr_output.decode("utf-8")
print(stderr_output) print(stderr_output)
print(stdout_output) print(stdout_output)
print("Benchmark Version: 1.0.0") print("Benchmark Version: 1.0.0")
print("JSON ERROR COUNT:") print("JSON ERROR COUNT:")
count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:") count_errors = stdout_output.count(
print(f'{count_errors}/50 Human feedbacks') "Error: The following AI output couldn't be converted to a JSON:"
)
print(f"{count_errors}/50 Human feedbacks")
# Run the test case. # Run the test case.
if __name__ == '__main__': if __name__ == "__main__":
benchmark_entrepeneur_gpt_with_difficult_user() benchmark_entrepeneur_gpt_with_difficult_user()

View File

@ -1,8 +1,8 @@
import argparse import argparse
import logging import logging
from autogpt.config import Config
from autogpt.commands.file_operations import ingest_file, search_files from autogpt.commands.file_operations import ingest_file, search_files
from autogpt.config import Config
from autogpt.memory import get_memory from autogpt.memory import get_memory
cfg = Config() cfg = Config()

View File

@ -483,7 +483,7 @@ How to Become a Freelance Artificial Intelligence Engineer
Springboard Springboard
https://www.springboard.com Blog Data Science https://www.springboard.com Blog Data Science
29/10/2021 — There are numerous freelancing platforms where you can kick start your career as a freelance artificial intelligence engineer. 29/10/2021 — There are numerous freelancing platforms where you can kick-start your career as a freelance artificial intelligence engineer.
More to ask More to ask
Is AI good for freelancing? Is AI good for freelancing?
What business can I start with AI? What business can I start with AI?

View File

@ -8,4 +8,33 @@ readme = "README.md"
line-length = 88 line-length = 88
target-version = ['py310'] target-version = ['py310']
include = '\.pyi?$' include = '\.pyi?$'
extend-exclude = "" packages = ["autogpt"]
extend-exclude = '.+/(dist|.venv|venv|build)/.+'
[tool.isort]
profile = "black"
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
use_parentheses = true
ensure_newline_before_comments = true
line_length = 88
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"FIRSTPARTY",
"LOCALFOLDER"
]
skip = '''
.tox
__pycache__
*.pyc
.env
venv*/*
.venv/*
reports/*
dist/*
'''

View File

@ -1,28 +0,0 @@
beautifulsoup4
colorama==0.4.6
openai==0.27.2
playsound==1.2.2
python-dotenv==1.0.0
pyyaml==6.0
readability-lxml==0.8.1
requests
tiktoken==0.3.3
gTTS==2.3.1
docker
duckduckgo-search
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
pinecone-client==2.2.1
redis
orjson
Pillow
selenium
webdriver-manager
coverage
flake8
numpy
pre-commit
black
isort
gitpython==3.1.31
tweepy
jsonschema

View File

@ -30,6 +30,8 @@ sourcery
isort isort
gitpython==3.1.31 gitpython==3.1.31
# Items below this point will not be included in the Docker Image
# Testing dependencies # Testing dependencies
pytest pytest
asynctest asynctest

View File

@ -1,6 +1,7 @@
import pkg_resources
import sys import sys
import pkg_resources
def main(): def main():
requirements_file = sys.argv[1] requirements_file = sys.argv[1]

View File

@ -1,4 +1,5 @@
import unittest import unittest
import coverage import coverage
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,6 +1,6 @@
import unittest
import os import os
import sys import sys
import unittest
from bs4 import BeautifulSoup from bs4 import BeautifulSoup

View File

@ -1,28 +1,21 @@
import os
import sys
import unittest import unittest
from unittest import mock from unittest import mock
import sys from uuid import uuid4
import os
from weaviate import Client from weaviate import Client
from weaviate.util import get_valid_uuid from weaviate.util import get_valid_uuid
from uuid import uuid4
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding from autogpt.memory.base import get_ada_embedding
from autogpt.memory.weaviate import WeaviateMemory
@mock.patch.dict(os.environ, {
"WEAVIATE_HOST": "127.0.0.1",
"WEAVIATE_PROTOCOL": "http",
"WEAVIATE_PORT": "8080",
"WEAVIATE_USERNAME": "",
"WEAVIATE_PASSWORD": "",
"MEMORY_INDEX": "AutogptTests"
})
class TestWeaviateMemory(unittest.TestCase): class TestWeaviateMemory(unittest.TestCase):
cfg = None cfg = None
client = None client = None
index = None
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
@ -32,13 +25,19 @@ class TestWeaviateMemory(unittest.TestCase):
if cls.cfg.use_weaviate_embedded: if cls.cfg.use_weaviate_embedded:
from weaviate.embedded import EmbeddedOptions from weaviate.embedded import EmbeddedOptions
cls.client = Client(embedded_options=EmbeddedOptions( cls.client = Client(
hostname=cls.cfg.weaviate_host, embedded_options=EmbeddedOptions(
port=int(cls.cfg.weaviate_port), hostname=cls.cfg.weaviate_host,
persistence_data_path=cls.cfg.weaviate_embedded_path port=int(cls.cfg.weaviate_port),
)) persistence_data_path=cls.cfg.weaviate_embedded_path,
)
)
else: else:
cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}") cls.client = Client(
f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
)
cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
""" """
In order to run these tests you will need a local instance of In order to run these tests you will need a local instance of
@ -49,32 +48,33 @@ class TestWeaviateMemory(unittest.TestCase):
USE_WEAVIATE_EMBEDDED=True USE_WEAVIATE_EMBEDDED=True
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
""" """
def setUp(self): def setUp(self):
try: try:
self.client.schema.delete_class(self.cfg.memory_index) self.client.schema.delete_class(self.index)
except: except:
pass pass
self.memory = WeaviateMemory(self.cfg) self.memory = WeaviateMemory(self.cfg)
def test_add(self): def test_add(self):
doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones' doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
self.memory.add(doc) self.memory.add(doc)
result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do() result = self.client.query.get(self.index, ["raw_text"]).do()
actual = result['data']['Get'][self.cfg.memory_index] actual = result["data"]["Get"][self.index]
self.assertEqual(len(actual), 1) self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['raw_text'], doc) self.assertEqual(actual[0]["raw_text"], doc)
def test_get(self): def test_get(self):
doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos' doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos"
with self.client.batch as batch: with self.client.batch as batch:
batch.add_data_object( batch.add_data_object(
uuid=get_valid_uuid(uuid4()), uuid=get_valid_uuid(uuid4()),
data_object={'raw_text': doc}, data_object={"raw_text": doc},
class_name=self.cfg.memory_index, class_name=self.index,
vector=get_ada_embedding(doc) vector=get_ada_embedding(doc),
) )
batch.flush() batch.flush()
@ -86,8 +86,8 @@ class TestWeaviateMemory(unittest.TestCase):
def test_get_stats(self): def test_get_stats(self):
docs = [ docs = [
'You are now about to count the number of docs in this index', "You are now about to count the number of docs in this index",
'And then you about to find out if you can count correctly' "And then you about to find out if you can count correctly",
] ]
[self.memory.add(doc) for doc in docs] [self.memory.add(doc) for doc in docs]
@ -95,23 +95,23 @@ class TestWeaviateMemory(unittest.TestCase):
stats = self.memory.get_stats() stats = self.memory.get_stats()
self.assertTrue(stats) self.assertTrue(stats)
self.assertTrue('count' in stats) self.assertTrue("count" in stats)
self.assertEqual(stats['count'], 2) self.assertEqual(stats["count"], 2)
def test_clear(self): def test_clear(self):
docs = [ docs = [
'Shame this is the last test for this class', "Shame this is the last test for this class",
'Testing is fun when someone else is doing it' "Testing is fun when someone else is doing it",
] ]
[self.memory.add(doc) for doc in docs] [self.memory.add(doc) for doc in docs]
self.assertEqual(self.memory.get_stats()['count'], 2) self.assertEqual(self.memory.get_stats()["count"], 2)
self.memory.clear() self.memory.clear()
self.assertEqual(self.memory.get_stats()['count'], 0) self.assertEqual(self.memory.get_stats()["count"], 0)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -1,4 +1,5 @@
import unittest import unittest
import tests.context import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens from autogpt.token_counter import count_message_tokens, count_string_tokens

View File

@ -1,6 +1,6 @@
# Generated by CodiumAI # Generated by CodiumAI
import unittest
import time import time
import unittest
from unittest.mock import patch from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context from autogpt.chat import create_chat_message, generate_context