Merge pull request #1393 from 0xArty/feature/pre-commit-formatter
Feature/pre commit formatterpull/909/head^2
commit
1b3f82e729
|
@ -0,0 +1,12 @@
|
|||
[flake8]
|
||||
max-line-length = 88
|
||||
extend-ignore = E203
|
||||
exclude =
|
||||
.tox,
|
||||
__pycache__,
|
||||
*.pyc,
|
||||
.env
|
||||
venv/*
|
||||
.venv/*
|
||||
reports/*
|
||||
dist/*
|
|
@ -1,3 +1,4 @@
|
|||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
|
@ -19,10 +20,135 @@ log.txt
|
|||
log-ingestion.txt
|
||||
logs
|
||||
|
||||
# Coverage reports
|
||||
.coverage
|
||||
coverage.xml
|
||||
htmlcov/
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# For Macs Dev Environs: ignoring .Desktop Services_Store
|
||||
.DS_Store
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
plugins/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
llama-*
|
||||
vicuna-*
|
|
@ -0,0 +1,10 @@
|
|||
[settings]
|
||||
profile = black
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = True
|
||||
force_grid_wrap = 0
|
||||
use_parentheses = True
|
||||
ensure_newline_before_comments = True
|
||||
line_length = 88
|
||||
skip = venv,env,node_modules,.env,.venv,dist
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
|
@ -0,0 +1,33 @@
|
|||
repos:
|
||||
- repo: https://github.com/sourcery-ai/sourcery
|
||||
rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
|
||||
hooks:
|
||||
- id: sourcery
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v0.9.2
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
args: [ '--maxkb=500' ]
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort-local
|
||||
entry: isort
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
||||
- id: black
|
||||
name: black-local
|
||||
entry: black
|
||||
language: python
|
||||
types: [ python ]
|
||||
exclude: .+/(dist|.venv|venv|build)/.+
|
||||
pass_filenames: true
|
|
@ -0,0 +1,71 @@
|
|||
# 🪄 This is your project's Sourcery configuration file.
|
||||
|
||||
# You can use it to get Sourcery working in the way you want, such as
|
||||
# ignoring specific refactorings, skipping directories in your project,
|
||||
# or writing custom rules.
|
||||
|
||||
# 📚 For a complete reference to this file, see the documentation at
|
||||
# https://docs.sourcery.ai/Configuration/Project-Settings/
|
||||
|
||||
# This file was auto-generated by Sourcery on 2023-02-25 at 21:07.
|
||||
|
||||
version: '1' # The schema version of this config file
|
||||
|
||||
ignore: # A list of paths or files which Sourcery will ignore.
|
||||
- .git
|
||||
- venv
|
||||
- .venv
|
||||
- build
|
||||
- dist
|
||||
- env
|
||||
- .env
|
||||
- .tox
|
||||
|
||||
rule_settings:
|
||||
enable:
|
||||
- default
|
||||
- gpsg
|
||||
disable: [] # A list of rule IDs Sourcery will never suggest.
|
||||
rule_types:
|
||||
- refactoring
|
||||
- suggestion
|
||||
- comment
|
||||
python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
||||
|
||||
# rules: # A list of custom rules Sourcery will include in its analysis.
|
||||
# - id: no-print-statements
|
||||
# description: Do not use print statements in the test directory.
|
||||
# pattern: print(...)
|
||||
# language: python
|
||||
# replacement:
|
||||
# condition:
|
||||
# explanation:
|
||||
# paths:
|
||||
# include:
|
||||
# - test
|
||||
# exclude:
|
||||
# - conftest.py
|
||||
# tests: []
|
||||
# tags: []
|
||||
|
||||
# rule_tags: {} # Additional rule tags.
|
||||
|
||||
# metrics:
|
||||
# quality_threshold: 25.0
|
||||
|
||||
# github:
|
||||
# labels: []
|
||||
# ignore_labels:
|
||||
# - sourcery-ignore
|
||||
# request_review: author
|
||||
# sourcery_branch: sourcery/{base_branch}
|
||||
|
||||
# clone_detection:
|
||||
# min_lines: 3
|
||||
# min_duplicates: 2
|
||||
# identical_clones_only: false
|
||||
|
||||
# proxy:
|
||||
# url:
|
||||
# ssl_certs_file:
|
||||
# no_ssl_verify: false
|
|
@ -8,41 +8,49 @@ To contribute to this GitHub project, you can follow these steps:
|
|||
```
|
||||
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
||||
```
|
||||
3. Create a new branch for your changes using the following command:
|
||||
3. Install the project requirements
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
4. Install pre-commit hooks
|
||||
```
|
||||
pre-commit install
|
||||
```
|
||||
5. Create a new branch for your changes using the following command:
|
||||
|
||||
```
|
||||
git checkout -b "branch-name"
|
||||
```
|
||||
4. Make your changes to the code or documentation.
|
||||
6. Make your changes to the code or documentation.
|
||||
- Example: Improve User Interface or Add Documentation.
|
||||
|
||||
|
||||
5. Add the changes to the staging area using the following command:
|
||||
7. Add the changes to the staging area using the following command:
|
||||
```
|
||||
git add .
|
||||
```
|
||||
|
||||
6. Commit the changes with a meaningful commit message using the following command:
|
||||
8. Commit the changes with a meaningful commit message using the following command:
|
||||
```
|
||||
git commit -m "your commit message"
|
||||
```
|
||||
7. Push the changes to your forked repository using the following command:
|
||||
9. Push the changes to your forked repository using the following command:
|
||||
```
|
||||
git push origin branch-name
|
||||
```
|
||||
8. Go to the GitHub website and navigate to your forked repository.
|
||||
10. Go to the GitHub website and navigate to your forked repository.
|
||||
|
||||
9. Click the "New pull request" button.
|
||||
11. Click the "New pull request" button.
|
||||
|
||||
10. Select the branch you just pushed to and the branch you want to merge into on the original repository.
|
||||
12. Select the branch you just pushed to and the branch you want to merge into on the original repository.
|
||||
|
||||
11. Add a description of your changes and click the "Create pull request" button.
|
||||
13. Add a description of your changes and click the "Create pull request" button.
|
||||
|
||||
12. Wait for the project maintainer to review your changes and provide feedback.
|
||||
14. Wait for the project maintainer to review your changes and provide feedback.
|
||||
|
||||
13. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project.
|
||||
15. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project.
|
||||
|
||||
14. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands:
|
||||
16. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands:
|
||||
|
||||
```
|
||||
git fetch upstream
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
from autogpt import commands as cmd
|
||||
from autogpt import utils
|
||||
from autogpt.memory import get_memory, get_supported_memory_backends
|
||||
from autogpt import chat
|
||||
from colorama import Fore, Style
|
||||
from autogpt.spinner import Spinner
|
||||
import time
|
||||
from autogpt import speak
|
||||
import traceback
|
||||
|
||||
import yaml
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import chat
|
||||
from autogpt import commands as cmd
|
||||
from autogpt import speak, utils
|
||||
from autogpt.ai_config import AIConfig
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
from autogpt.ai_config import AIConfig
|
||||
import traceback
|
||||
import yaml
|
||||
import argparse
|
||||
from autogpt.logger import logger
|
||||
import logging
|
||||
from autogpt.memory import get_memory, get_supported_memory_backends
|
||||
from autogpt.prompt import get_prompt
|
||||
from autogpt.spinner import Spinner
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
@ -25,8 +26,8 @@ def check_openai_api_key():
|
|||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not cfg.openai_api_key:
|
||||
print(
|
||||
Fore.RED +
|
||||
"Please set your OpenAI API key in .env or as an environment variable."
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
)
|
||||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
@ -34,19 +35,24 @@ def check_openai_api_key():
|
|||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. Trying to fix it now."
|
||||
)
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
# Use regex to search for JSON objects
|
||||
import regex
|
||||
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN)
|
||||
logger.typewriter_log(
|
||||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("Apparently json was fixed.")
|
||||
else:
|
||||
|
@ -71,7 +77,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
|
||||
|
@ -80,7 +88,11 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
|
||||
assistant_reply_json = (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply_json
|
||||
)
|
||||
)
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
|
@ -95,7 +107,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
|
@ -107,7 +121,7 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split('\n')
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
@ -121,7 +135,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
except json.decoder.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
|
@ -141,12 +157,15 @@ def construct_prompt():
|
|||
f"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
speak_text=True)
|
||||
should_continue = utils.clean_input(f"""Continue with the last settings?
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = utils.clean_input(
|
||||
f"""Continue with the last settings?
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
Continue (y/n): """)
|
||||
Continue (y/n): """
|
||||
)
|
||||
if should_continue.lower() == "n":
|
||||
config = AIConfig()
|
||||
|
||||
|
@ -170,28 +189,27 @@ def prompt_user():
|
|||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load defaults.",
|
||||
speak_text=True)
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Get AI Name from User
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'Entrepreneur-GPT'")
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input("AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!",
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"I am at your service.",
|
||||
speak_text=True)
|
||||
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
||||
)
|
||||
|
||||
# Get AI Role from User
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
@ -200,7 +218,8 @@ def prompt_user():
|
|||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'",
|
||||
)
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
|
@ -209,8 +228,11 @@ def prompt_user():
|
|||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if len(ai_goals) == 0:
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously"]
|
||||
ai_goals = [
|
||||
"Increase net worth",
|
||||
"Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously",
|
||||
]
|
||||
|
||||
config = AIConfig(ai_name, ai_role, ai_goals)
|
||||
return config
|
||||
|
@ -223,16 +245,42 @@ def parse_arguments():
|
|||
cfg.set_continuous_mode(False)
|
||||
cfg.set_speak_mode(False)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Process arguments.')
|
||||
parser.add_argument('--continuous', '-c', action='store_true', help='Enable Continuous Mode')
|
||||
parser.add_argument('--continuous-limit', '-l', type=int, dest="continuous_limit", help='Defines the number of times to run in continuous mode')
|
||||
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
||||
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
||||
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
|
||||
parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use')
|
||||
parser.add_argument('--skip-reprompt', '-y', dest='skip_reprompt', action='store_true', help='Skips the re-prompting messages at the beginning of the script')
|
||||
parser.add_argument('--ai-settings', '-C', dest='ai_settings_file', help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.")
|
||||
parser = argparse.ArgumentParser(description="Process arguments.")
|
||||
parser.add_argument(
|
||||
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--continuous-limit",
|
||||
"-l",
|
||||
type=int,
|
||||
dest="continuous_limit",
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
|
||||
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
|
||||
parser.add_argument(
|
||||
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
|
||||
)
|
||||
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
|
||||
parser.add_argument(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
dest="memory_type",
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
dest="skip_reprompt",
|
||||
action="store_true",
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
dest="ai_settings_file",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
|
@ -244,14 +292,14 @@ def parse_arguments():
|
|||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.",
|
||||
)
|
||||
cfg.set_continuous_mode(True)
|
||||
|
||||
if args.continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ",
|
||||
Fore.GREEN,
|
||||
f"{args.continuous_limit}")
|
||||
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
|
||||
)
|
||||
cfg.set_continuous_limit(args.continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
|
@ -274,7 +322,11 @@ def parse_arguments():
|
|||
supported_memory = get_supported_memory_backends()
|
||||
chosen = args.memory_type
|
||||
if not chosen in supported_memory:
|
||||
logger.typewriter_log("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}')
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
|
||||
else:
|
||||
cfg.memory_backend = chosen
|
||||
|
@ -316,14 +368,14 @@ def main():
|
|||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
print('Using memory of type: ' + memory.__class__.__name__)
|
||||
print("Using memory of type: " + memory.__class__.__name__)
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
prompt=prompt,
|
||||
user_input=user_input
|
||||
user_input=user_input,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
@ -340,13 +392,16 @@ class Agent:
|
|||
user_input: The user input.
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
prompt,
|
||||
user_input):
|
||||
user_input,
|
||||
):
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.full_message_history = full_message_history
|
||||
|
@ -360,8 +415,14 @@ class Agent:
|
|||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
|
||||
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
|
||||
# Send message to AI, get response
|
||||
|
@ -371,7 +432,8 @@ class Agent:
|
|||
self.user_input,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(assistant_reply)
|
||||
|
@ -379,7 +441,8 @@ class Agent:
|
|||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = cmd.get_command(
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(f"I want to execute {command_name}")
|
||||
except Exception as e:
|
||||
|
@ -393,21 +456,29 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
|
||||
flush=True)
|
||||
flush=True,
|
||||
)
|
||||
while True:
|
||||
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
console_input = utils.clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().rstrip() == "y":
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
self.next_action_count = abs(int(console_input.split(" ")[1]))
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
|
||||
print(
|
||||
"Invalid input format. Please enter 'y -n' where n is the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
|
@ -422,7 +493,8 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"")
|
||||
"",
|
||||
)
|
||||
elif self.user_input == "EXIT":
|
||||
print("Exiting...", flush=True)
|
||||
break
|
||||
|
@ -431,11 +503,14 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Command {command_name} threw the following error: " + arguments
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: " + arguments
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
else:
|
||||
|
@ -443,22 +518,28 @@ class Agent:
|
|||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
memory_to_add = f"Assistant Reply: {assistant_reply} " \
|
||||
f"\nResult: {result} " \
|
||||
memory_to_add = (
|
||||
f"Assistant Reply: {assistant_reply} "
|
||||
f"\nResult: {result} "
|
||||
f"\nHuman Feedback: {self.user_input} "
|
||||
)
|
||||
|
||||
self.memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(chat.create_chat_message("system", result))
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message("system", result)
|
||||
)
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message(
|
||||
"system", "Unable to execute command"))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
chat.create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
102
autogpt/agent.py
102
autogpt/agent.py
|
@ -1,4 +1,3 @@
|
|||
import autogpt.commands as cmd
|
||||
import json
|
||||
import traceback
|
||||
from tkinter.ttk import Style
|
||||
|
@ -6,9 +5,10 @@ from tkinter.ttk import Style
|
|||
from colorama import Fore
|
||||
|
||||
import autogpt.chat
|
||||
import autogpt.commands as cmd
|
||||
import autogpt.speak
|
||||
from autogpt.config import Config
|
||||
from autogpt.logger import logger
|
||||
import autogpt.speak
|
||||
from autogpt.spinner import Spinner
|
||||
|
||||
|
||||
|
@ -24,13 +24,16 @@ class Agent:
|
|||
user_input: The user input.
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ai_name,
|
||||
memory,
|
||||
full_message_history,
|
||||
next_action_count,
|
||||
prompt,
|
||||
user_input):
|
||||
user_input,
|
||||
):
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.full_message_history = full_message_history
|
||||
|
@ -45,8 +48,14 @@ class Agent:
|
|||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
|
||||
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and loop_count > cfg.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
)
|
||||
break
|
||||
|
||||
# Send message to AI, get response
|
||||
|
@ -56,7 +65,8 @@ class Agent:
|
|||
self.user_input,
|
||||
self.full_message_history,
|
||||
self.memory,
|
||||
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
cfg.fast_token_limit,
|
||||
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(assistant_reply)
|
||||
|
@ -64,7 +74,8 @@ class Agent:
|
|||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = cmd.get_command(
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(f"I want to execute {command_name}")
|
||||
except Exception as e:
|
||||
|
@ -78,21 +89,29 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
|
||||
flush=True)
|
||||
flush=True,
|
||||
)
|
||||
while True:
|
||||
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
console_input = utils.clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().rstrip() == "y":
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
self.next_action_count = abs(int(console_input.split(" ")[1]))
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
)
|
||||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
|
||||
print(
|
||||
"Invalid input format. Please enter 'y -n' where n is the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
|
@ -107,7 +126,8 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"")
|
||||
"",
|
||||
)
|
||||
elif self.user_input == "EXIT":
|
||||
print("Exiting...", flush=True)
|
||||
break
|
||||
|
@ -116,11 +136,14 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = f"Command {command_name} threw the following error: " + arguments
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: " + arguments
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
else:
|
||||
|
@ -128,40 +151,51 @@ class Agent:
|
|||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
memory_to_add = f"Assistant Reply: {assistant_reply} " \
|
||||
f"\nResult: {result} " \
|
||||
memory_to_add = (
|
||||
f"Assistant Reply: {assistant_reply} "
|
||||
f"\nResult: {result} "
|
||||
f"\nHuman Feedback: {self.user_input} "
|
||||
)
|
||||
|
||||
self.memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(chat.create_chat_message("system", result))
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message("system", result)
|
||||
)
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message(
|
||||
"system", "Unable to execute command"))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
chat.create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
)
|
||||
|
||||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
||||
cfg = Config()
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. Trying to fix it now."
|
||||
)
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
# Use regex to search for JSON objects
|
||||
import regex
|
||||
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN)
|
||||
logger.typewriter_log(
|
||||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("Apparently json was fixed.")
|
||||
else:
|
||||
|
@ -187,7 +221,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
|
||||
|
@ -196,7 +232,11 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
|
||||
assistant_reply_json = (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply_json
|
||||
)
|
||||
)
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
|
@ -211,7 +251,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
|
@ -223,7 +265,7 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split('\n')
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
@ -237,7 +279,9 @@ def print_assistant_thoughts(assistant_reply):
|
|||
except json.decoder.JSONDecodeError as e:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
|
|
|
@ -12,7 +12,9 @@ def create_agent(task, prompt, model):
|
|||
global next_key
|
||||
global agents
|
||||
|
||||
messages = [{"role": "user", "content": prompt}, ]
|
||||
messages = [
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import yaml
|
||||
import os
|
||||
|
||||
import yaml
|
||||
|
||||
from autogpt.prompt import get_prompt
|
||||
|
||||
|
||||
|
@ -13,7 +15,9 @@ class AIConfig:
|
|||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
"""
|
||||
|
||||
def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
|
||||
def __init__(
|
||||
self, ai_name: str = "", ai_role: str = "", ai_goals: list = []
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
|
||||
|
@ -30,7 +34,7 @@ class AIConfig:
|
|||
self.ai_goals = ai_goals
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
|
||||
|
||||
@classmethod
|
||||
def load(cls: object, config_file: str = SAVE_FILE) -> object:
|
||||
|
@ -47,7 +51,7 @@ class AIConfig:
|
|||
"""
|
||||
|
||||
try:
|
||||
with open(config_file, encoding='utf-8') as file:
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
@ -69,8 +73,12 @@ class AIConfig:
|
|||
None
|
||||
"""
|
||||
|
||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||
with open(config_file, "w", encoding='utf-8') as file:
|
||||
config = {
|
||||
"ai_name": self.ai_name,
|
||||
"ai_role": self.ai_role,
|
||||
"ai_goals": self.ai_goals,
|
||||
}
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(self) -> str:
|
||||
|
@ -87,7 +95,9 @@ class AIConfig:
|
|||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
full_prompt = (
|
||||
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
)
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
from typing import List
|
||||
import json
|
||||
from autogpt.config import Config
|
||||
from typing import List
|
||||
|
||||
from autogpt.call_ai_function import call_ai_function
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from urllib.parse import urlparse, urljoin
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
cfg = Config()
|
||||
memory = get_memory(cfg)
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({'User-Agent': cfg.user_agent})
|
||||
session.headers.update({"User-Agent": cfg.user_agent})
|
||||
|
||||
|
||||
# Function to check if the URL is valid
|
||||
|
@ -28,7 +30,12 @@ def sanitize_url(url):
|
|||
|
||||
# Define and check for local file address prefixes
|
||||
def check_local_file_access(url):
|
||||
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
|
||||
local_prefixes = [
|
||||
"file:///",
|
||||
"file://localhost",
|
||||
"http://localhost",
|
||||
"https://localhost",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
|
@ -36,11 +43,11 @@ def get_response(url, timeout=10):
|
|||
try:
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
raise ValueError('Access to local files is restricted')
|
||||
raise ValueError("Access to local files is restricted")
|
||||
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith('http://') and not url.startswith('https://'):
|
||||
raise ValueError('Invalid URL format')
|
||||
if not url.startswith("http://") and not url.startswith("https://"):
|
||||
raise ValueError("Invalid URL format")
|
||||
|
||||
sanitized_url = sanitize_url(url)
|
||||
|
||||
|
@ -74,7 +81,7 @@ def scrape_text(url):
|
|||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = '\n'.join(chunk for chunk in chunks if chunk)
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
|
||||
return text
|
||||
|
||||
|
@ -82,8 +89,8 @@ def scrape_text(url):
|
|||
def extract_hyperlinks(soup):
|
||||
"""Extract hyperlinks from a BeautifulSoup object"""
|
||||
hyperlinks = []
|
||||
for link in soup.find_all('a', href=True):
|
||||
hyperlinks.append((link.text, link['href']))
|
||||
for link in soup.find_all("a", href=True):
|
||||
hyperlinks.append((link.text, link["href"]))
|
||||
return hyperlinks
|
||||
|
||||
|
||||
|
@ -134,7 +141,7 @@ def create_message(chunk, question):
|
|||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following question: "{question}" -- if the question cannot be answered using the text, please summarize the text.',
|
||||
}
|
||||
|
||||
|
||||
|
@ -152,8 +159,7 @@ def summarize_text(url, text, question):
|
|||
for i, chunk in enumerate(chunks):
|
||||
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" \
|
||||
f"Raw content part#{i + 1}: {chunk}"
|
||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
|
@ -168,8 +174,7 @@ def summarize_text(url, text, question):
|
|||
summaries.append(summary)
|
||||
print(f"Added chunk {i + 1} summary to memory")
|
||||
|
||||
memory_to_add = f"Source: {url}\n" \
|
||||
f"Content summary part#{i + 1}: {summary}"
|
||||
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
@ -22,8 +23,6 @@ def call_ai_function(function, args, description, model=None):
|
|||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
response = create_chat_completion(
|
||||
model=model, messages=messages, temperature=0
|
||||
)
|
||||
response = create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
return response
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
import logging
|
||||
import time
|
||||
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from autogpt.config import Config
|
||||
|
||||
from autogpt import token_counter
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.logger import logger
|
||||
import logging
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
@ -26,28 +28,33 @@ def create_chat_message(role, content):
|
|||
|
||||
def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
current_context = [
|
||||
create_chat_message("system", prompt),
|
||||
create_chat_message(
|
||||
"system", prompt),
|
||||
"system", f"The current time and date is {time.strftime('%c')}"
|
||||
),
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"),
|
||||
create_chat_message(
|
||||
"system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
|
||||
"system",
|
||||
f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
||||
),
|
||||
]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
return next_message_to_add_index, current_tokens_used, insertion_index, current_context
|
||||
return (
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
)
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
prompt,
|
||||
user_input,
|
||||
full_message_history,
|
||||
permanent_memory,
|
||||
token_limit):
|
||||
prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
|
@ -70,31 +77,51 @@ def chat_with_ai(
|
|||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = '' if len(full_message_history) ==0 else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
||||
relevant_memory = (
|
||||
""
|
||||
if len(full_message_history) == 0
|
||||
else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
||||
)
|
||||
|
||||
logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')
|
||||
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[1:]
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
insertion_index,
|
||||
current_context,
|
||||
) = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model
|
||||
)
|
||||
|
||||
current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
|
||||
current_tokens_used += token_counter.count_message_tokens(
|
||||
[create_chat_message("user", user_input)], model
|
||||
) # Account for user input (appended later)
|
||||
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
|
||||
tokens_to_add = token_counter.count_message_tokens(
|
||||
[message_to_add], model
|
||||
)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context, after the two system prompts.
|
||||
current_context.insert(insertion_index, full_message_history[next_message_to_add_index])
|
||||
current_context.insert(
|
||||
insertion_index, full_message_history[next_message_to_add_index]
|
||||
)
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
|
@ -130,12 +157,10 @@ def chat_with_ai(
|
|||
)
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(create_chat_message("user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"assistant", assistant_reply))
|
||||
create_chat_message("assistant", assistant_reply)
|
||||
)
|
||||
|
||||
return assistant_reply
|
||||
except openai.error.RateLimitError:
|
||||
|
|
|
@ -1,19 +1,11 @@
|
|||
from autogpt import browse
|
||||
import json
|
||||
from autogpt.memory import get_memory
|
||||
import datetime
|
||||
import autogpt.agent_manager as agents
|
||||
from autogpt import speak
|
||||
from autogpt.config import Config
|
||||
import autogpt.ai_functions as ai
|
||||
from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
|
||||
from autogpt.execute_code import execute_python_file, execute_shell
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
from autogpt.image_gen import generate_image
|
||||
import json
|
||||
|
||||
from duckduckgo_search import ddg
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
from autogpt.web import browse_website
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
|
@ -57,7 +49,6 @@ def execute_command(command_name, arguments):
|
|||
|
||||
try:
|
||||
if command_name == "google":
|
||||
|
||||
# Check if the Google API key is set and use the official search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial search method
|
||||
key = cfg.google_api_key
|
||||
|
@ -69,9 +60,8 @@ def execute_command(command_name, arguments):
|
|||
return memory.add(arguments["string"])
|
||||
elif command_name == "start_agent":
|
||||
return start_agent(
|
||||
arguments["name"],
|
||||
arguments["task"],
|
||||
arguments["prompt"])
|
||||
arguments["name"], arguments["task"], arguments["prompt"]
|
||||
)
|
||||
elif command_name == "message_agent":
|
||||
return message_agent(arguments["key"], arguments["message"])
|
||||
elif command_name == "list_agents":
|
||||
|
@ -125,8 +115,9 @@ def execute_command(command_name, arguments):
|
|||
|
||||
def get_datetime():
|
||||
"""Return the current date and time"""
|
||||
return "Current date and time: " + \
|
||||
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
return "Current date and time: " + datetime.datetime.now().strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
|
||||
|
||||
def google_search(query, num_results=8):
|
||||
|
@ -140,9 +131,10 @@ def google_search(query, num_results=8):
|
|||
|
||||
def google_official_search(query, num_results=8):
|
||||
"""Return the results of a google search using the official Google API"""
|
||||
import json
|
||||
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
import json
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
|
@ -153,7 +145,11 @@ def google_official_search(query, num_results=8):
|
|||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
|
||||
# Send the search query and retrieve the results
|
||||
result = service.cse().list(q=query, cx=custom_search_engine_id, num=num_results).execute()
|
||||
result = (
|
||||
service.cse()
|
||||
.list(q=query, cx=custom_search_engine_id, num=num_results)
|
||||
.execute()
|
||||
)
|
||||
|
||||
# Extract the search result items from the response
|
||||
search_results = result.get("items", [])
|
||||
|
@ -166,7 +162,11 @@ def google_official_search(query, num_results=8):
|
|||
error_details = json.loads(e.content.decode())
|
||||
|
||||
# Check if the error is related to an invalid or missing API key
|
||||
if error_details.get("error", {}).get("code") == 403 and "invalid API key" in error_details.get("error", {}).get("message", ""):
|
||||
if error_details.get("error", {}).get(
|
||||
"code"
|
||||
) == 403 and "invalid API key" in error_details.get("error", {}).get(
|
||||
"message", ""
|
||||
):
|
||||
return "Error: The provided Google API key is invalid or missing."
|
||||
else:
|
||||
return f"Error: {e}"
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import abc
|
||||
import os
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
|
@ -17,9 +19,7 @@ class Singleton(abc.ABCMeta, type):
|
|||
def __call__(cls, *args, **kwargs):
|
||||
"""Call method for the singleton metaclass."""
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(
|
||||
Singleton, cls).__call__(
|
||||
*args, **kwargs)
|
||||
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
|
@ -50,8 +50,10 @@ class Config(metaclass=Singleton):
|
|||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.temperature = float(os.getenv("TEMPERATURE", "1"))
|
||||
self.use_azure = os.getenv("USE_AZURE") == 'True'
|
||||
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
|
||||
self.use_azure = os.getenv("USE_AZURE") == "True"
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
)
|
||||
|
||||
if self.use_azure:
|
||||
self.load_azure_config()
|
||||
|
@ -80,15 +82,18 @@ class Config(metaclass=Singleton):
|
|||
|
||||
# User agent headers to use when browsing web
|
||||
# Some websites might just completely deny request with an error code if no user agent was found.
|
||||
self.user_agent = os.getenv("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")
|
||||
self.user_agent = os.getenv(
|
||||
"USER_AGENT",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
)
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
|
@ -105,13 +110,17 @@ class Config(metaclass=Singleton):
|
|||
if model == self.fast_llm_model:
|
||||
return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"]
|
||||
elif model == self.smart_llm_model:
|
||||
return self.azure_model_to_deployment_id_map["smart_llm_model_deployment_id"]
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"smart_llm_model_deployment_id"
|
||||
]
|
||||
elif model == "text-embedding-ada-002":
|
||||
return self.azure_model_to_deployment_id_map["embedding_model_deployment_id"]
|
||||
return self.azure_model_to_deployment_id_map[
|
||||
"embedding_model_deployment_id"
|
||||
]
|
||||
else:
|
||||
return ""
|
||||
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'azure.yaml')
|
||||
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
|
||||
|
||||
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
||||
"""
|
||||
|
@ -128,9 +137,15 @@ class Config(metaclass=Singleton):
|
|||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
self.openai_api_type = os.getenv("OPENAI_API_TYPE", config_params.get("azure_api_type", "azure"))
|
||||
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", ""))
|
||||
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", ""))
|
||||
self.openai_api_type = os.getenv(
|
||||
"OPENAI_API_TYPE", config_params.get("azure_api_type", "azure")
|
||||
)
|
||||
self.openai_api_base = os.getenv(
|
||||
"OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", "")
|
||||
)
|
||||
self.openai_api_version = os.getenv(
|
||||
"OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", "")
|
||||
)
|
||||
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
|
||||
|
||||
def set_continuous_mode(self, value: bool):
|
||||
|
|
|
@ -1,19 +1,22 @@
|
|||
import argparse
|
||||
import logging
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.file_operations import ingest_file, search_files
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def configure_logging():
|
||||
logging.basicConfig(filename='log-ingestion.txt',
|
||||
filemode='a',
|
||||
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
|
||||
datefmt='%H:%M:%S',
|
||||
level=logging.DEBUG)
|
||||
return logging.getLogger('AutoGPT-Ingestion')
|
||||
logging.basicConfig(
|
||||
filename="log-ingestion.txt",
|
||||
filemode="a",
|
||||
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
|
||||
datefmt="%H:%M:%S",
|
||||
level=logging.DEBUG,
|
||||
)
|
||||
return logging.getLogger("AutoGPT-Ingestion")
|
||||
|
||||
|
||||
def ingest_directory(directory, memory, args):
|
||||
|
@ -34,19 +37,38 @@ def ingest_directory(directory, memory, args):
|
|||
def main():
|
||||
logger = configure_logging()
|
||||
|
||||
parser = argparse.ArgumentParser(description="Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script.")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script."
|
||||
)
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("--file", type=str, help="The file to ingest.")
|
||||
group.add_argument("--dir", type=str, help="The directory containing the files to ingest.")
|
||||
parser.add_argument("--init", action='store_true', help="Init the memory and wipe its content (default: False)", default=False)
|
||||
parser.add_argument("--overlap", type=int, help="The overlap size between chunks when ingesting files (default: 200)", default=200)
|
||||
parser.add_argument("--max_length", type=int, help="The max_length of each chunk when ingesting files (default: 4000)", default=4000)
|
||||
group.add_argument(
|
||||
"--dir", type=str, help="The directory containing the files to ingest."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--init",
|
||||
action="store_true",
|
||||
help="Init the memory and wipe its content (default: False)",
|
||||
default=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overlap",
|
||||
type=int,
|
||||
help="The overlap size between chunks when ingesting files (default: 200)",
|
||||
default=200,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_length",
|
||||
type=int,
|
||||
help="The max_length of each chunk when ingesting files (default: 4000)",
|
||||
default=4000,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize memory
|
||||
memory = get_memory(cfg, init=args.init)
|
||||
print('Using memory of type: ' + memory.__class__.__name__)
|
||||
print("Using memory of type: " + memory.__class__.__name__)
|
||||
|
||||
if args.file:
|
||||
try:
|
||||
|
@ -63,7 +85,9 @@ def main():
|
|||
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
||||
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
||||
else:
|
||||
print("Please provide either a file path (--file) or a directory name (--dir) inside the auto_gpt_workspace directory as input.")
|
||||
print(
|
||||
"Please provide either a file path (--file) or a directory name (--dir) inside the auto_gpt_workspace directory as input."
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import docker
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import docker
|
||||
|
||||
WORKSPACE_FOLDER = "auto_gpt_workspace"
|
||||
|
||||
|
@ -20,7 +20,9 @@ def execute_python_file(file):
|
|||
return f"Error: File '{file}' does not exist."
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(f'python {file_path}', capture_output=True, encoding="utf8", shell=True)
|
||||
result = subprocess.run(
|
||||
f"python {file_path}", capture_output=True, encoding="utf8", shell=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
else:
|
||||
|
@ -29,18 +31,20 @@ def execute_python_file(file):
|
|||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
image_name = 'python:3.10'
|
||||
image_name = "python:3.10"
|
||||
try:
|
||||
client.images.get(image_name)
|
||||
print(f"Image '{image_name}' found locally")
|
||||
except docker.errors.ImageNotFound:
|
||||
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
|
||||
print(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
# Use the low-level API to stream the pull response
|
||||
low_level_client = docker.APIClient()
|
||||
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
||||
# Print the status and progress, if available
|
||||
status = line.get('status')
|
||||
progress = line.get('progress')
|
||||
status = line.get("status")
|
||||
progress = line.get("progress")
|
||||
if status and progress:
|
||||
print(f"{status}: {progress}")
|
||||
elif status:
|
||||
|
@ -51,19 +55,21 @@ def execute_python_file(file):
|
|||
# https://hub.docker.com/_/python
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
f'python {file}',
|
||||
f"python {file}",
|
||||
volumes={
|
||||
os.path.abspath(WORKSPACE_FOLDER): {
|
||||
'bind': '/workspace',
|
||||
'mode': 'ro'}},
|
||||
working_dir='/workspace',
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
},
|
||||
working_dir="/workspace",
|
||||
stderr=True,
|
||||
stdout=True,
|
||||
detach=True,
|
||||
)
|
||||
|
||||
output = container.wait()
|
||||
logs = container.logs().decode('utf-8')
|
||||
logs = container.logs().decode("utf-8")
|
||||
container.remove()
|
||||
|
||||
# print(f"Execution complete. Output: {output}")
|
||||
|
@ -76,7 +82,6 @@ def execute_python_file(file):
|
|||
|
||||
|
||||
def execute_shell(command_line):
|
||||
|
||||
current_dir = os.getcwd()
|
||||
|
||||
if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary
|
||||
|
@ -96,4 +101,4 @@ def execute_shell(command_line):
|
|||
|
||||
|
||||
def we_are_running_in_a_docker_container():
|
||||
os.path.exists('/.dockerenv')
|
||||
os.path.exists("/.dockerenv")
|
||||
|
|
|
@ -47,7 +47,7 @@ def read_file(filename):
|
|||
"""Read a file and return the contents"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
with open(filepath, "r", encoding='utf-8') as f:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
|
@ -75,8 +75,9 @@ def ingest_file(filename, memory, max_length=4000, overlap=200):
|
|||
num_chunks = len(chunks)
|
||||
for i, chunk in enumerate(chunks):
|
||||
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||
memory_to_add = f"Filename: {filename}\n" \
|
||||
f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
memory_to_add = (
|
||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||
)
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
|
@ -92,7 +93,7 @@ def write_to_file(filename, text):
|
|||
directory = os.path.dirname(filepath)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(filepath, "w", encoding='utf-8') as f:
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
return "File written to successfully."
|
||||
except Exception as e:
|
||||
|
@ -130,7 +131,7 @@ def search_files(directory):
|
|||
|
||||
for root, _, files in os.walk(search_directory):
|
||||
for file in files:
|
||||
if file.startswith('.'):
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
|
||||
found_files.append(relative_path)
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
import requests
|
||||
import io
|
||||
import os.path
|
||||
from PIL import Image
|
||||
from autogpt.config import Config
|
||||
import uuid
|
||||
import openai
|
||||
from base64 import b64decode
|
||||
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
|
||||
def generate_image(prompt):
|
||||
|
||||
filename = str(uuid.uuid4()) + ".jpg"
|
||||
|
||||
# DALL-E
|
||||
if cfg.image_provider == 'dalle':
|
||||
|
||||
if cfg.image_provider == "dalle":
|
||||
openai.api_key = cfg.openai_api_key
|
||||
|
||||
response = openai.Image.create(
|
||||
|
@ -38,14 +38,19 @@ def generate_image(prompt):
|
|||
return "Saved to disk:" + filename
|
||||
|
||||
# STABLE DIFFUSION
|
||||
elif cfg.image_provider == 'sd':
|
||||
|
||||
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
elif cfg.image_provider == "sd":
|
||||
API_URL = (
|
||||
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
|
||||
|
||||
response = requests.post(API_URL, headers=headers, json={
|
||||
response = requests.post(
|
||||
API_URL,
|
||||
headers=headers,
|
||||
json={
|
||||
"inputs": prompt,
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print("Image Generated for prompt:" + prompt)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from autogpt.call_ai_function import call_ai_function
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils import correct_json
|
||||
|
@ -28,12 +29,11 @@ JSON_SCHEMA = """
|
|||
|
||||
|
||||
def fix_and_parse_json(
|
||||
json_str: str,
|
||||
try_to_fix_with_gpt: bool = True
|
||||
json_str: str, try_to_fix_with_gpt: bool = True
|
||||
) -> Union[str, Dict[Any, Any]]:
|
||||
"""Fix and parse JSON string"""
|
||||
try:
|
||||
json_str = json_str.replace('\t', '')
|
||||
json_str = json_str.replace("\t", "")
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as _: # noqa: F841
|
||||
try:
|
||||
|
@ -57,10 +57,12 @@ def fix_and_parse_json(
|
|||
# Can throw a ValueError if there is no "{" or "}" in the json_str
|
||||
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
||||
if try_to_fix_with_gpt:
|
||||
logger.warn("Warning: Failed to parse AI output, attempting to fix."
|
||||
logger.warn(
|
||||
"Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly.")
|
||||
" slightly."
|
||||
)
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_str, JSON_SCHEMA)
|
||||
|
||||
|
@ -80,11 +82,13 @@ def fix_json(json_str: str, schema: str) -> str:
|
|||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
description_string = "Fixes the provided JSON string to make it parseable"\
|
||||
" and fully compliant with the provided schema.\n If an object or"\
|
||||
" field specified in the schema isn't contained within the correct"\
|
||||
" JSON, it is omitted.\n This function is brilliant at guessing"\
|
||||
description_string = (
|
||||
"Fixes the provided JSON string to make it parseable"
|
||||
" and fully compliant with the provided schema.\n If an object or"
|
||||
" field specified in the schema isn't contained within the correct"
|
||||
" JSON, it is omitted.\n This function is brilliant at guessing"
|
||||
" when the format is incorrect."
|
||||
)
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_str.startswith("`"):
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import re
|
||||
import json
|
||||
import re
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
@ -17,7 +18,7 @@ def extract_char_position(error_message: str) -> int:
|
|||
"""
|
||||
import re
|
||||
|
||||
char_pattern = re.compile(r'\(char (\d+)\)')
|
||||
char_pattern = re.compile(r"\(char (\d+)\)")
|
||||
if match := char_pattern.search(error_message):
|
||||
return int(match[1])
|
||||
else:
|
||||
|
@ -38,10 +39,8 @@ def add_quotes_to_property_names(json_string: str) -> str:
|
|||
def replace_func(match):
|
||||
return f'"{match.group(1)}":'
|
||||
|
||||
property_name_pattern = re.compile(r'(\w+):')
|
||||
corrected_json_string = property_name_pattern.sub(
|
||||
replace_func,
|
||||
json_string)
|
||||
property_name_pattern = re.compile(r"(\w+):")
|
||||
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
|
||||
|
||||
try:
|
||||
json.loads(corrected_json_string)
|
||||
|
@ -61,15 +60,15 @@ def balance_braces(json_string: str) -> str:
|
|||
str: The JSON string with braces balanced.
|
||||
"""
|
||||
|
||||
open_braces_count = json_string.count('{')
|
||||
close_braces_count = json_string.count('}')
|
||||
open_braces_count = json_string.count("{")
|
||||
close_braces_count = json_string.count("}")
|
||||
|
||||
while open_braces_count > close_braces_count:
|
||||
json_string += '}'
|
||||
json_string += "}"
|
||||
close_braces_count += 1
|
||||
|
||||
while close_braces_count > open_braces_count:
|
||||
json_string = json_string.rstrip('}')
|
||||
json_string = json_string.rstrip("}")
|
||||
close_braces_count -= 1
|
||||
|
||||
try:
|
||||
|
@ -80,16 +79,15 @@ def balance_braces(json_string: str) -> str:
|
|||
|
||||
|
||||
def fix_invalid_escape(json_str: str, error_message: str) -> str:
|
||||
while error_message.startswith('Invalid \\escape'):
|
||||
while error_message.startswith("Invalid \\escape"):
|
||||
bad_escape_location = extract_char_position(error_message)
|
||||
json_str = json_str[:bad_escape_location] + \
|
||||
json_str[bad_escape_location + 1:]
|
||||
json_str = json_str[:bad_escape_location] + json_str[bad_escape_location + 1 :]
|
||||
try:
|
||||
json.loads(json_str)
|
||||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug_mode:
|
||||
print('json loads error - fix invalid escape', e)
|
||||
print("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_str
|
||||
|
||||
|
@ -109,18 +107,20 @@ def correct_json(json_str: str) -> str:
|
|||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug_mode:
|
||||
print('json loads error', e)
|
||||
print("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith('Invalid \\escape'):
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_str = fix_invalid_escape(json_str, error_message)
|
||||
if error_message.startswith('Expecting property name enclosed in double quotes'):
|
||||
if error_message.startswith(
|
||||
"Expecting property name enclosed in double quotes"
|
||||
):
|
||||
json_str = add_quotes_to_property_names(json_str)
|
||||
try:
|
||||
json.loads(json_str)
|
||||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug_mode:
|
||||
print('json loads error - add quotes', e)
|
||||
print("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_str):
|
||||
return balanced_str
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import time
|
||||
|
||||
import openai
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
@ -10,7 +12,9 @@ openai.api_key = cfg.openai_api_key
|
|||
|
||||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
|
||||
def create_chat_completion(
|
||||
messages, model=None, temperature=cfg.temperature, max_tokens=None
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API"""
|
||||
response = None
|
||||
num_retries = 5
|
||||
|
@ -22,24 +26,30 @@ def create_chat_completion(messages, model=None, temperature=cfg.temperature, ma
|
|||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except openai.error.RateLimitError:
|
||||
if cfg.debug_mode:
|
||||
print(Fore.RED + "Error: ", "API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET)
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
"API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(20)
|
||||
except openai.error.APIError as e:
|
||||
if e.http_status == 502:
|
||||
if cfg.debug_mode:
|
||||
print(Fore.RED + "Error: ", "API Bad gateway. Waiting 20 seconds..." + Fore.RESET)
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
"API Bad gateway. Waiting 20 seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(20)
|
||||
else:
|
||||
raise
|
||||
|
|
|
@ -4,35 +4,33 @@ import random
|
|||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
from colorama import Fore
|
||||
|
||||
from colorama import Style
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import speak
|
||||
from autogpt.config import Config
|
||||
from autogpt.config import Singleton
|
||||
from autogpt.config import Config, Singleton
|
||||
|
||||
cfg = Config()
|
||||
|
||||
'''
|
||||
"""
|
||||
Logger that handle titles in different colors.
|
||||
Outputs logs in console, activity.log, and errors.log
|
||||
For console handler: simulates typing
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
def __init__(self):
|
||||
# create log directory if it doesn't exist
|
||||
this_files_dir_path = os.path.dirname(__file__)
|
||||
log_dir = os.path.join(this_files_dir_path, '../logs')
|
||||
log_dir = os.path.join(this_files_dir_path, "../logs")
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
log_file = "activity.log"
|
||||
error_file = "error.log"
|
||||
|
||||
console_formatter = AutoGptFormatter('%(title_color)s %(message)s')
|
||||
console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
|
||||
|
||||
# Create a handler for console which simulate typing
|
||||
self.typing_console_handler = TypingConsoleHandler()
|
||||
|
@ -47,35 +45,34 @@ class Logger(metaclass=Singleton):
|
|||
# Info handler in activity.log
|
||||
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
|
||||
self.file_handler.setLevel(logging.DEBUG)
|
||||
info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s')
|
||||
info_formatter = AutoGptFormatter(
|
||||
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
|
||||
)
|
||||
self.file_handler.setFormatter(info_formatter)
|
||||
|
||||
# Error handler error.log
|
||||
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
|
||||
error_handler.setLevel(logging.ERROR)
|
||||
error_formatter = AutoGptFormatter(
|
||||
'%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s')
|
||||
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s"
|
||||
)
|
||||
error_handler.setFormatter(error_formatter)
|
||||
|
||||
self.typing_logger = logging.getLogger('TYPER')
|
||||
self.typing_logger = logging.getLogger("TYPER")
|
||||
self.typing_logger.addHandler(self.typing_console_handler)
|
||||
self.typing_logger.addHandler(self.file_handler)
|
||||
self.typing_logger.addHandler(error_handler)
|
||||
self.typing_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.logger = logging.getLogger('LOGGER')
|
||||
self.logger = logging.getLogger("LOGGER")
|
||||
self.logger.addHandler(self.console_handler)
|
||||
self.logger.addHandler(self.file_handler)
|
||||
self.logger.addHandler(error_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
def typewriter_log(
|
||||
self,
|
||||
title='',
|
||||
title_color='',
|
||||
content='',
|
||||
speak_text=False,
|
||||
level=logging.INFO):
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
):
|
||||
if speak_text and cfg.speak_mode:
|
||||
speak.say_text(f"{title}. {content}")
|
||||
|
||||
|
@ -85,41 +82,34 @@ class Logger(metaclass=Singleton):
|
|||
else:
|
||||
content = ""
|
||||
|
||||
self.typing_logger.log(level, content, extra={'title': title, 'color': title_color})
|
||||
self.typing_logger.log(
|
||||
level, content, extra={"title": title, "color": title_color}
|
||||
)
|
||||
|
||||
def debug(
|
||||
self,
|
||||
message,
|
||||
title='',
|
||||
title_color='',
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
title='',
|
||||
title_color='',
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.WARN)
|
||||
|
||||
def error(
|
||||
self,
|
||||
title,
|
||||
message=''
|
||||
):
|
||||
def error(self, title, message=""):
|
||||
self._log(title, Fore.RED, message, logging.ERROR)
|
||||
|
||||
def _log(
|
||||
self,
|
||||
title='',
|
||||
title_color='',
|
||||
message='',
|
||||
level=logging.INFO):
|
||||
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
||||
if message:
|
||||
if isinstance(message, list):
|
||||
message = " ".join(message)
|
||||
self.logger.log(level, message, extra={'title': title, 'color': title_color})
|
||||
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
||||
|
||||
def set_level(self, level):
|
||||
self.logger.setLevel(level)
|
||||
|
@ -132,9 +122,9 @@ class Logger(metaclass=Singleton):
|
|||
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
||||
|
||||
|
||||
'''
|
||||
"""
|
||||
Output stream to console using simulated typing
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
class TypingConsoleHandler(logging.StreamHandler):
|
||||
|
@ -173,21 +163,27 @@ class AutoGptFormatter(logging.Formatter):
|
|||
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
||||
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
||||
"""
|
||||
|
||||
def format(self, record: LogRecord) -> str:
|
||||
if (hasattr(record, 'color')):
|
||||
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL
|
||||
if hasattr(record, "color"):
|
||||
record.title_color = (
|
||||
getattr(record, "color")
|
||||
+ getattr(record, "title")
|
||||
+ " "
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
else:
|
||||
record.title_color = getattr(record, 'title')
|
||||
if hasattr(record, 'msg'):
|
||||
record.message_no_color = remove_color_codes(getattr(record, 'msg'))
|
||||
record.title_color = getattr(record, "title")
|
||||
if hasattr(record, "msg"):
|
||||
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
||||
else:
|
||||
record.message_no_color = ''
|
||||
record.message_no_color = ""
|
||||
return super().format(record)
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
||||
return ansi_escape.sub('', s)
|
||||
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
||||
return ansi_escape.sub("", s)
|
||||
|
||||
|
||||
logger = Logger()
|
||||
|
|
|
@ -1,19 +1,22 @@
|
|||
from autogpt.memory.local import LocalCache
|
||||
from autogpt.memory.no_memory import NoMemory
|
||||
|
||||
# List of supported memory backends
|
||||
# Add a backend to this list if the import attempt is successful
|
||||
supported_memory = ['local', 'no_memory']
|
||||
supported_memory = ["local", "no_memory"]
|
||||
|
||||
try:
|
||||
from autogpt.memory.redismem import RedisMemory
|
||||
supported_memory.append('redis')
|
||||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.pinecone import PineconeMemory
|
||||
supported_memory.append('pinecone')
|
||||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
@ -23,16 +26,20 @@ def get_memory(cfg, init=False):
|
|||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print("Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend.")
|
||||
print(
|
||||
"Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = PineconeMemory(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print("Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend.")
|
||||
print(
|
||||
"Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "no_memory":
|
||||
|
@ -49,10 +56,4 @@ def get_supported_memory_backends():
|
|||
return supported_memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"LocalCache",
|
||||
"RedisMemory",
|
||||
"PineconeMemory",
|
||||
"NoMemory"
|
||||
]
|
||||
__all__ = ["get_memory", "LocalCache", "RedisMemory", "PineconeMemory", "NoMemory"]
|
||||
|
|
|
@ -1,17 +1,24 @@
|
|||
"""Base class for memory providers."""
|
||||
import abc
|
||||
from autogpt.config import AbstractSingleton, Config
|
||||
|
||||
import openai
|
||||
|
||||
from autogpt.config import AbstractSingleton, Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_ada_embedding(text):
|
||||
text = text.replace("\n", " ")
|
||||
if cfg.use_azure:
|
||||
return openai.Embedding.create(input=[text], engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"))["data"][0]["embedding"]
|
||||
return openai.Embedding.create(
|
||||
input=[text],
|
||||
engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"),
|
||||
)["data"][0]["embedding"]
|
||||
else:
|
||||
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
|
||||
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
|
||||
"data"
|
||||
][0]["embedding"]
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
import dataclasses
|
||||
import orjson
|
||||
from typing import Any, List, Optional
|
||||
import numpy as np
|
||||
import os
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
@ -23,16 +24,15 @@ class CacheContent:
|
|||
|
||||
|
||||
class LocalCache(MemoryProviderSingleton):
|
||||
|
||||
# on load, load our database
|
||||
def __init__(self, cfg) -> None:
|
||||
self.filename = f"{cfg.memory_index}.json"
|
||||
if os.path.exists(self.filename):
|
||||
try:
|
||||
with open(self.filename, 'w+b') as f:
|
||||
with open(self.filename, "w+b") as f:
|
||||
file_content = f.read()
|
||||
if not file_content.strip():
|
||||
file_content = b'{}'
|
||||
file_content = b"{}"
|
||||
f.write(file_content)
|
||||
|
||||
loaded = orjson.loads(file_content)
|
||||
|
@ -41,7 +41,9 @@ class LocalCache(MemoryProviderSingleton):
|
|||
print(f"Error: The file '{self.filename}' is not in JSON format.")
|
||||
self.data = CacheContent()
|
||||
else:
|
||||
print(f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file.")
|
||||
print(
|
||||
f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file."
|
||||
)
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
|
@ -54,7 +56,7 @@ class LocalCache(MemoryProviderSingleton):
|
|||
|
||||
Returns: None
|
||||
"""
|
||||
if 'Command Error:' in text:
|
||||
if "Command Error:" in text:
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
|
@ -70,11 +72,8 @@ class LocalCache(MemoryProviderSingleton):
|
|||
axis=0,
|
||||
)
|
||||
|
||||
with open(self.filename, 'wb') as f:
|
||||
out = orjson.dumps(
|
||||
self.data,
|
||||
option=SAVE_OPTIONS
|
||||
)
|
||||
with open(self.filename, "wb") as f:
|
||||
out = orjson.dumps(self.data, option=SAVE_OPTIONS)
|
||||
f.write(out)
|
||||
return text
|
||||
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
from autogpt.config import Config, Singleton
|
||||
|
||||
import pinecone
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
from autogpt.logger import logger
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.config import Config, Singleton
|
||||
from autogpt.logger import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
|
@ -24,13 +23,21 @@ class PineconeMemory(MemoryProviderSingleton):
|
|||
try:
|
||||
pinecone.whoami()
|
||||
except Exception as e:
|
||||
logger.typewriter_log("FAILED TO CONNECT TO PINECONE", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
|
||||
logger.double_check("Please ensure you have setup and configured Pinecone properly for use. " +
|
||||
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO PINECONE",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Pinecone properly for use. "
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if table_name not in pinecone.list_indexes():
|
||||
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
|
||||
pinecone.create_index(
|
||||
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
||||
)
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
|
@ -55,9 +62,11 @@ class PineconeMemory(MemoryProviderSingleton):
|
|||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
results = self.index.query(query_embedding, top_k=num_relevant, include_metadata=True)
|
||||
results = self.index.query(
|
||||
query_embedding, top_k=num_relevant, include_metadata=True
|
||||
)
|
||||
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
||||
return [str(item['metadata']["raw_text"]) for item in sorted_results]
|
||||
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
||||
|
||||
def get_stats(self):
|
||||
return self.index.describe_index_stats()
|
||||
|
|
|
@ -1,26 +1,22 @@
|
|||
"""Redis memory provider."""
|
||||
from typing import Any, List, Optional
|
||||
import redis
|
||||
from redis.commands.search.field import VectorField, TextField
|
||||
from redis.commands.search.query import Query
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
|
||||
import numpy as np
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
from autogpt.logger import logger
|
||||
import redis
|
||||
from colorama import Fore, Style
|
||||
from redis.commands.search.field import TextField, VectorField
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
from redis.commands.search.query import Query
|
||||
|
||||
from autogpt.logger import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
VectorField(
|
||||
"embedding",
|
||||
"HNSW",
|
||||
{
|
||||
"TYPE": "FLOAT32",
|
||||
"DIM": 1536,
|
||||
"DISTANCE_METRIC": "COSINE"
|
||||
}
|
||||
{"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
|
||||
),
|
||||
]
|
||||
|
||||
|
@ -43,7 +39,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
host=redis_host,
|
||||
port=redis_port,
|
||||
password=redis_password,
|
||||
db=0 # Cannot be changed
|
||||
db=0, # Cannot be changed
|
||||
)
|
||||
self.cfg = cfg
|
||||
|
||||
|
@ -51,9 +47,15 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
try:
|
||||
self.redis.ping()
|
||||
except redis.ConnectionError as e:
|
||||
logger.typewriter_log("FAILED TO CONNECT TO REDIS", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
|
||||
logger.double_check("Please ensure you have setup and configured Redis properly for use. " +
|
||||
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
|
||||
logger.typewriter_log(
|
||||
"FAILED TO CONNECT TO REDIS",
|
||||
Fore.RED,
|
||||
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
||||
)
|
||||
logger.double_check(
|
||||
"Please ensure you have setup and configured Redis properly for use. "
|
||||
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if cfg.wipe_redis_on_start:
|
||||
|
@ -62,15 +64,13 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
self.redis.ft(f"{cfg.memory_index}").create_index(
|
||||
fields=SCHEMA,
|
||||
definition=IndexDefinition(
|
||||
prefix=[f"{cfg.memory_index}:"],
|
||||
index_type=IndexType.HASH
|
||||
)
|
||||
prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num')
|
||||
self.vec_num = int(existing_vec_num.decode('utf-8')) if\
|
||||
existing_vec_num else 0
|
||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
|
@ -81,20 +81,18 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
|
||||
Returns: Message indicating that the data has been added.
|
||||
"""
|
||||
if 'Command Error:' in data:
|
||||
if "Command Error:" in data:
|
||||
return ""
|
||||
vector = get_ada_embedding(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {
|
||||
b"data": data,
|
||||
"embedding": vector
|
||||
}
|
||||
data_dict = {b"data": data, "embedding": vector}
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n"\
|
||||
f"data: {data}"
|
||||
_text = (
|
||||
f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
|
||||
)
|
||||
self.vec_num += 1
|
||||
pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num)
|
||||
pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
|
||||
pipe.execute()
|
||||
return _text
|
||||
|
||||
|
@ -118,11 +116,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(
|
||||
self,
|
||||
data: str,
|
||||
num_relevant: int = 5
|
||||
) -> Optional[List[Any]]:
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
|
@ -133,10 +127,12 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = Query(base_query).return_fields(
|
||||
"data",
|
||||
"vector_score"
|
||||
).sort_by("vector_score").dialect(2)
|
||||
query = (
|
||||
Query(base_query)
|
||||
.return_fields("data", "vector_score")
|
||||
.sort_by("vector_score")
|
||||
.dialect(2)
|
||||
)
|
||||
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
||||
|
||||
try:
|
||||
|
|
|
@ -13,17 +13,35 @@ def get_prompt():
|
|||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint("~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.")
|
||||
prompt_generator.add_constraint("If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.")
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint('Exclusively use the commands listed in double quotes e.g. "command name"')
|
||||
prompt_generator.add_constraint(
|
||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
||||
)
|
||||
|
||||
# Define the command list
|
||||
commands = [
|
||||
("Google Search", "google", {"input": "<search>"}),
|
||||
("Browse Website", "browse_website", {"url": "<url>", "question": "<what_you_want_to_find_on_website>"}),
|
||||
("Start GPT Agent", "start_agent", {"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"}),
|
||||
("Message GPT Agent", "message_agent", {"key": "<key>", "message": "<message>"}),
|
||||
(
|
||||
"Browse Website",
|
||||
"browse_website",
|
||||
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
|
||||
),
|
||||
(
|
||||
"Start GPT Agent",
|
||||
"start_agent",
|
||||
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
|
||||
),
|
||||
(
|
||||
"Message GPT Agent",
|
||||
"message_agent",
|
||||
{"key": "<key>", "message": "<message>"},
|
||||
),
|
||||
("List GPT Agents", "list_agents", {}),
|
||||
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
|
||||
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
|
||||
|
@ -32,10 +50,22 @@ def get_prompt():
|
|||
("Delete file", "delete_file", {"file": "<file>"}),
|
||||
("Search Files", "search_files", {"directory": "<directory>"}),
|
||||
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
|
||||
("Get Improved Code", "improve_code", {"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"}),
|
||||
("Write Tests", "write_tests", {"code": "<full_code_string>", "focus": "<list_of_focus_areas>"}),
|
||||
(
|
||||
"Get Improved Code",
|
||||
"improve_code",
|
||||
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
|
||||
),
|
||||
(
|
||||
"Write Tests",
|
||||
"write_tests",
|
||||
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
|
||||
),
|
||||
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
||||
("Execute Shell Command, non-interactive commands only", "execute_shell", { "command_line": "<command_line>"}),
|
||||
(
|
||||
"Execute Shell Command, non-interactive commands only",
|
||||
"execute_shell",
|
||||
{"command_line": "<command_line>"},
|
||||
),
|
||||
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
||||
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
||||
("Do Nothing", "do_nothing", {}),
|
||||
|
@ -46,16 +76,28 @@ def get_prompt():
|
|||
prompt_generator.add_command(command_label, command_name, args)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource("Internet access for searches and information gathering.")
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
)
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource("GPT-3.5 powered Agents for delegation of simple tasks.")
|
||||
prompt_generator.add_resource(
|
||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
||||
)
|
||||
prompt_generator.add_resource("File output.")
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.")
|
||||
prompt_generator.add_performance_evaluation("Constructively self-criticize your big-picture behavior constantly.")
|
||||
prompt_generator.add_performance_evaluation("Reflect on past decisions and strategies to refine your approach.")
|
||||
prompt_generator.add_performance_evaluation("Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.")
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions to ensure you are performing to the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps."
|
||||
)
|
||||
|
||||
# Generate the prompt string
|
||||
prompt_string = prompt_generator.generate_prompt_string()
|
||||
|
|
|
@ -20,14 +20,9 @@ class PromptGenerator:
|
|||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
def add_constraint(self, constraint):
|
||||
|
@ -51,8 +46,7 @@ class PromptGenerator:
|
|||
if args is None:
|
||||
args = {}
|
||||
|
||||
command_args = {arg_key: arg_value for arg_key,
|
||||
arg_value in args.items()}
|
||||
command_args = {arg_key: arg_value for arg_key, arg_value in args.items()}
|
||||
|
||||
command = {
|
||||
"label": command_label,
|
||||
|
@ -72,8 +66,9 @@ class PromptGenerator:
|
|||
Returns:
|
||||
str: The formatted command string.
|
||||
"""
|
||||
args_string = ', '.join(
|
||||
f'"{key}": "{value}"' for key, value in command['args'].items())
|
||||
args_string = ", ".join(
|
||||
f'"{key}": "{value}"' for key, value in command["args"].items()
|
||||
)
|
||||
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
|
||||
|
||||
def add_resource(self, resource):
|
||||
|
@ -94,7 +89,7 @@ class PromptGenerator:
|
|||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items, item_type='list'):
|
||||
def _generate_numbered_list(self, items, item_type="list"):
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
|
@ -105,8 +100,11 @@ class PromptGenerator:
|
|||
Returns:
|
||||
str: The formatted numbered list.
|
||||
"""
|
||||
if item_type == 'command':
|
||||
return "\n".join(f"{i+1}. {self._generate_command_string(item)}" for i, item in enumerate(items))
|
||||
if item_type == "command":
|
||||
return "\n".join(
|
||||
f"{i+1}. {self._generate_command_string(item)}"
|
||||
for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
||||
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
import os
|
||||
from playsound import playsound
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
import gtts
|
||||
import threading
|
||||
from threading import Lock, Semaphore
|
||||
|
||||
import gtts
|
||||
|
||||
# Default voice IDs
|
||||
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
|
||||
|
@ -19,26 +23,29 @@ placeholders = {"your-voice-id"}
|
|||
|
||||
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
|
||||
voices = [
|
||||
custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0],
|
||||
custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1]
|
||||
custom_voice_1
|
||||
if custom_voice_1 and custom_voice_1 not in placeholders
|
||||
else default_voices[0],
|
||||
custom_voice_2
|
||||
if custom_voice_2 and custom_voice_2 not in placeholders
|
||||
else default_voices[1],
|
||||
]
|
||||
|
||||
tts_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": cfg.elevenlabs_api_key
|
||||
}
|
||||
tts_headers = {"Content-Type": "application/json", "xi-api-key": cfg.elevenlabs_api_key}
|
||||
|
||||
mutex_lock = Lock() # Ensure only one sound is played at a time
|
||||
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
|
||||
queue_semaphore = Semaphore(
|
||||
1
|
||||
) # The amount of sounds to queue before blocking the main thread
|
||||
|
||||
|
||||
def eleven_labs_speech(text, voice_index=0):
|
||||
"""Speak text using elevenlabs.io's API"""
|
||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||
voice_id=voices[voice_index])
|
||||
voice_id=voices[voice_index]
|
||||
)
|
||||
formatted_message = {"text": text}
|
||||
response = requests.post(
|
||||
tts_url, headers=tts_headers, json=formatted_message)
|
||||
response = requests.post(tts_url, headers=tts_headers, json=formatted_message)
|
||||
|
||||
if response.status_code == 200:
|
||||
with mutex_lock:
|
||||
|
@ -90,12 +97,11 @@ def macos_tts_speech(text, voice_index=0):
|
|||
|
||||
|
||||
def say_text(text, voice_index=0):
|
||||
|
||||
def speak():
|
||||
if not cfg.elevenlabs_api_key:
|
||||
if cfg.use_mac_os_tts == 'True':
|
||||
if cfg.use_mac_os_tts == "True":
|
||||
macos_tts_speech(text)
|
||||
elif cfg.use_brian_tts == 'True':
|
||||
elif cfg.use_brian_tts == "True":
|
||||
success = brian_speech(text)
|
||||
if not success:
|
||||
gtts_speech(text)
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
import itertools
|
||||
import sys
|
||||
import threading
|
||||
import itertools
|
||||
import time
|
||||
|
||||
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
|
||||
def __init__(self, message="Loading...", delay=0.1):
|
||||
"""Initialize the spinner class"""
|
||||
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
|
||||
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
self.running = False
|
||||
|
|
|
@ -57,7 +57,7 @@ def split_text(text, max_length=8192):
|
|||
def create_message(chunk, question):
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following question: "{question}" -- if the question cannot be answered using the text, please summarize the text.',
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
from typing import Dict, List
|
||||
|
||||
import tiktoken
|
||||
from typing import List, Dict
|
||||
|
||||
|
||||
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
|
||||
def count_message_tokens(
|
||||
messages: List[Dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
||||
) -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
|
||||
|
@ -25,13 +28,17 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
|
|||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
return count_message_tokens(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_message = (
|
||||
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
)
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
||||
raise NotImplementedError(
|
||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
|
|
|
@ -2,7 +2,7 @@ import yaml
|
|||
from colorama import Fore
|
||||
|
||||
|
||||
def clean_input(prompt: str=''):
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
return input(prompt)
|
||||
except KeyboardInterrupt:
|
||||
|
@ -18,6 +18,9 @@ def validate_yaml_file(file: str):
|
|||
except FileNotFoundError:
|
||||
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
|
||||
except yaml.YAMLError as e:
|
||||
return (False, f"There was an issue while trying to read with your AI Settings file: {e}")
|
||||
return (
|
||||
False,
|
||||
f"There was an issue while trying to read with your AI Settings file: {e}",
|
||||
)
|
||||
|
||||
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
|
||||
|
|
|
@ -15,6 +15,7 @@ import os
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from autogpt.config import Config
|
||||
|
||||
file_dir = Path(__file__).parent
|
||||
cfg = Config()
|
||||
|
||||
|
@ -33,15 +34,19 @@ def browse_website(url, question):
|
|||
|
||||
|
||||
def scrape_text_with_selenium(url):
|
||||
logging.getLogger('selenium').setLevel(logging.CRITICAL)
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
|
||||
options = Options()
|
||||
options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36')
|
||||
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
driver = webdriver.Chrome(
|
||||
executable_path=ChromeDriverManager().install(), options=options
|
||||
)
|
||||
driver.get(url)
|
||||
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.TAG_NAME, 'body'))
|
||||
EC.presence_of_element_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
# Get the HTML content directly from the browser's DOM
|
||||
|
@ -54,7 +59,7 @@ def scrape_text_with_selenium(url):
|
|||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = '\n'.join(chunk for chunk in chunks if chunk)
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
return driver, text
|
||||
|
||||
|
||||
|
@ -75,7 +80,7 @@ def close_browser(driver):
|
|||
|
||||
|
||||
def extract_hyperlinks(soup):
|
||||
return [(link.text, link['href']) for link in soup.find_all('a', href=True)]
|
||||
return [(link.text, link["href"]) for link in soup.find_all("a", href=True)]
|
||||
|
||||
|
||||
def format_hyperlinks(hyperlinks):
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
[project]
|
||||
name = "auto-gpt"
|
||||
version = "0.1.0"
|
||||
description = "A GPT based ai agent"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
target-version = ['py310']
|
||||
include = '\.pyi?$'
|
||||
extend-exclude = ""
|
|
@ -18,3 +18,7 @@ Pillow
|
|||
coverage
|
||||
flake8
|
||||
numpy
|
||||
pre-commit
|
||||
black
|
||||
sourcery
|
||||
isort
|
|
@ -1,9 +1,11 @@
|
|||
from colorama import init, Style
|
||||
from colorama import Style, init
|
||||
|
||||
# Initialize colorama
|
||||
init(autoreset=True)
|
||||
|
||||
# Use the bold ANSI style
|
||||
print(f"""{Style.BRIGHT}Please run:
|
||||
print(
|
||||
f"""{Style.BRIGHT}Please run:
|
||||
python -m autogpt
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
|
2
tests.py
2
tests.py
|
@ -2,7 +2,7 @@ import unittest
|
|||
|
||||
if __name__ == "__main__":
|
||||
# Load all tests from the 'autogpt/tests' package
|
||||
suite = unittest.defaultTestLoader.discover('autogpt/tests')
|
||||
suite = unittest.defaultTestLoader.discover("autogpt/tests")
|
||||
|
||||
# Run the tests
|
||||
unittest.TextTestRunner().run(suite)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import sys
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), '../scripts')))
|
||||
sys.path.insert(
|
||||
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts"))
|
||||
)
|
||||
|
|
|
@ -1,17 +1,16 @@
|
|||
import unittest
|
||||
import random
|
||||
import string
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.local import LocalCache
|
||||
|
||||
|
||||
class TestLocalCache(unittest.TestCase):
|
||||
|
||||
def random_string(self, length):
|
||||
return ''.join(
|
||||
random.choice(string.ascii_letters) for _ in range(length))
|
||||
return "".join(random.choice(string.ascii_letters) for _ in range(length))
|
||||
|
||||
def setUp(self):
|
||||
cfg = cfg = Config()
|
||||
|
@ -20,10 +19,10 @@ class TestLocalCache(unittest.TestCase):
|
|||
|
||||
# Add example texts to the cache
|
||||
self.example_texts = [
|
||||
'The quick brown fox jumps over the lazy dog',
|
||||
'I love machine learning and natural language processing',
|
||||
'The cake is a lie, but the pie is always true',
|
||||
'ChatGPT is an advanced AI model for conversation'
|
||||
"The quick brown fox jumps over the lazy dog",
|
||||
"I love machine learning and natural language processing",
|
||||
"The cake is a lie, but the pie is always true",
|
||||
"ChatGPT is an advanced AI model for conversation",
|
||||
]
|
||||
|
||||
for text in self.example_texts:
|
||||
|
@ -46,5 +45,5 @@ class TestLocalCache(unittest.TestCase):
|
|||
self.assertIn(self.example_texts[1], relevant_texts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -5,16 +5,19 @@ from autogpt.memory.local import LocalCache
|
|||
|
||||
|
||||
def MockConfig():
|
||||
return type('MockConfig', (object,), {
|
||||
'debug_mode': False,
|
||||
'continuous_mode': False,
|
||||
'speak_mode': False,
|
||||
'memory_index': 'auto-gpt',
|
||||
})
|
||||
return type(
|
||||
"MockConfig",
|
||||
(object,),
|
||||
{
|
||||
"debug_mode": False,
|
||||
"continuous_mode": False,
|
||||
"speak_mode": False,
|
||||
"memory_index": "auto-gpt",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestLocalCache(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.cfg = MockConfig()
|
||||
self.cache = LocalCache(self.cfg)
|
||||
|
@ -49,5 +52,5 @@ class TestLocalCache(unittest.TestCase):
|
|||
self.assertEqual(stats, (1, self.cache.data.embeddings.shape))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
# Import the required libraries for unit testing
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from autogpt.promptgenerator import PromptGenerator
|
||||
|
||||
|
||||
# Create a test class for the PromptGenerator, subclassed from unittest.TestCase
|
||||
class promptgenerator_tests(unittest.TestCase):
|
||||
|
||||
# Set up the initial state for each test method by creating an instance of PromptGenerator
|
||||
def setUp(self):
|
||||
self.generator = PromptGenerator()
|
||||
|
@ -67,7 +66,8 @@ class promptgenerator_tests(unittest.TestCase):
|
|||
self.generator.add_constraint(constraint)
|
||||
for command in commands:
|
||||
self.generator.add_command(
|
||||
command["label"], command["name"], command["args"])
|
||||
command["label"], command["name"], command["args"]
|
||||
)
|
||||
for resource in resources:
|
||||
self.generator.add_resource(resource)
|
||||
for evaluation in evaluations:
|
||||
|
@ -95,5 +95,5 @@ class promptgenerator_tests(unittest.TestCase):
|
|||
|
||||
|
||||
# Run the tests when this script is executed
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import unittest
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
class TestConfig(unittest.TestCase):
|
||||
|
||||
def test_singleton(self):
|
||||
config1 = Config()
|
||||
config2 = Config()
|
||||
|
@ -55,5 +55,5 @@ class TestConfig(unittest.TestCase):
|
|||
self.assertTrue(config.debug_mode)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import unittest
|
||||
import tests.context
|
||||
|
||||
import tests.context
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
|
||||
|
||||
|
@ -53,21 +53,20 @@ class TestParseJson(unittest.TestCase):
|
|||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix."
|
||||
}
|
||||
"speak": "I will start browsing the repository to find any issues we can fix.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
def test_invalid_json_leading_sentence_with_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
|
@ -92,22 +91,21 @@ class TestParseJson(unittest.TestCase):
|
|||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs."
|
||||
}
|
||||
"speak": "I am browsing the repository to identify potential bugs.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -13,12 +13,18 @@ class TestParseJson(unittest.TestCase):
|
|||
def test_invalid_json_minor(self):
|
||||
# Test that an invalid JSON string can be fixed with gpt
|
||||
json_str = '{"name": "John", "age": 30, "city": "New York",}'
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"})
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False),
|
||||
{"name": "John", "age": 30, "city": "New York"},
|
||||
)
|
||||
|
||||
def test_invalid_json_major_with_gpt(self):
|
||||
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"})
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=True),
|
||||
{"name": "John", "age": 30, "city": "New York"},
|
||||
)
|
||||
|
||||
def test_invalid_json_major_without_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
|
@ -50,21 +56,20 @@ class TestParseJson(unittest.TestCase):
|
|||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
|
||||
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
|
||||
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
|
||||
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
|
||||
"speak": "I will start browsing the repository to find any issues we can fix."
|
||||
}
|
||||
"speak": "I will start browsing the repository to find any issues we can fix.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
def test_invalid_json_leading_sentence_with_gpt(self):
|
||||
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
|
||||
|
@ -89,22 +94,21 @@ class TestParseJson(unittest.TestCase):
|
|||
good_obj = {
|
||||
"command": {
|
||||
"name": "browse_website",
|
||||
"args": {
|
||||
"url": "https://github.com/Torantulino/Auto-GPT"
|
||||
}
|
||||
"args": {"url": "https://github.com/Torantulino/Auto-GPT"},
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "Browsing the repository to identify potential bugs",
|
||||
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
|
||||
"plan": "- Analyze the repository for potential bugs and areas of improvement",
|
||||
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
|
||||
"speak": "I am browsing the repository to identify potential bugs."
|
||||
}
|
||||
"speak": "I am browsing the repository to identify potential bugs.",
|
||||
},
|
||||
}
|
||||
# Assert that this raises an exception:
|
||||
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
|
||||
self.assertEqual(
|
||||
fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Generated by CodiumAI
|
||||
|
||||
# Dependencies:
|
||||
|
@ -39,7 +38,6 @@ requests and parse HTML content, respectively.
|
|||
|
||||
|
||||
class TestScrapeLinks:
|
||||
|
||||
# Tests that the function returns a list of formatted hyperlinks when
|
||||
# provided with a valid url that returns a webpage with hyperlinks.
|
||||
def test_valid_url_with_hyperlinks(self):
|
||||
|
@ -54,8 +52,10 @@ class TestScrapeLinks:
|
|||
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = "<html><body><a href='https://www.google.com'>Google</a></body></html>"
|
||||
mocker.patch('requests.get', return_value=mock_response)
|
||||
mock_response.text = (
|
||||
"<html><body><a href='https://www.google.com'>Google</a></body></html>"
|
||||
)
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a valid URL
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
@ -68,7 +68,7 @@ class TestScrapeLinks:
|
|||
# Mock the requests.get() function to return an HTTP error response
|
||||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 404
|
||||
mocker.patch('requests.get', return_value=mock_response)
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with an invalid URL
|
||||
result = scrape_links("https://www.invalidurl.com")
|
||||
|
@ -82,7 +82,7 @@ class TestScrapeLinks:
|
|||
mock_response = mocker.Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.text = "<html><body><p>No hyperlinks here</p></body></html>"
|
||||
mocker.patch('requests.get', return_value=mock_response)
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function with a URL containing no hyperlinks
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
@ -105,7 +105,7 @@ class TestScrapeLinks:
|
|||
</body>
|
||||
</html>
|
||||
"""
|
||||
mocker.patch('requests.get', return_value=mock_response)
|
||||
mocker.patch("requests.get", return_value=mock_response)
|
||||
|
||||
# Call the function being tested
|
||||
result = scrape_links("https://www.example.com")
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Generated by CodiumAI
|
||||
|
||||
import requests
|
||||
|
@ -35,7 +34,6 @@ Additional aspects:
|
|||
|
||||
|
||||
class TestScrapeText:
|
||||
|
||||
# Tests that scrape_text() returns the expected text when given a valid URL.
|
||||
def test_scrape_text_with_valid_url(self, mocker):
|
||||
# Mock the requests.get() method to return a response with expected text
|
||||
|
@ -74,7 +72,7 @@ class TestScrapeText:
|
|||
# Tests that the function returns an error message when the response status code is an http error (>=400).
|
||||
def test_http_error(self, mocker):
|
||||
# Mock the requests.get() method to return a response with a 404 status code
|
||||
mocker.patch('requests.get', return_value=mocker.Mock(status_code=404))
|
||||
mocker.patch("requests.get", return_value=mocker.Mock(status_code=404))
|
||||
|
||||
# Call the function with a URL
|
||||
result = scrape_text("https://www.example.com")
|
||||
|
|
Loading…
Reference in New Issue