parent
b65b7acace
commit
4bb7a598a5
|
@ -1,11 +1,8 @@
|
|||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import yaml
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import chat
|
||||
|
@ -16,10 +13,10 @@ from autogpt.config import Config
|
|||
from autogpt.json_parser import fix_and_parse_json
|
||||
from autogpt.logger import logger
|
||||
from autogpt.memory import get_memory, get_supported_memory_backends
|
||||
from autogpt.prompt import get_prompt
|
||||
from autogpt.spinner import Spinner
|
||||
|
||||
cfg = Config()
|
||||
config = None
|
||||
|
||||
|
||||
def check_openai_api_key():
|
||||
|
@ -36,7 +33,8 @@ def check_openai_api_key():
|
|||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. Trying to fix it now."
|
||||
"I have received an invalid JSON response from the OpenAI API. "
|
||||
"Trying to fix it now."
|
||||
)
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
|
@ -59,6 +57,8 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
|||
raise ValueError("No valid JSON object found")
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
if cfg.debug_mode:
|
||||
logger.error("Error: Invalid JSON: %s\n", json_string)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("Didn't work. I will have to ignore this response then.")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
|
@ -82,7 +82,8 @@ def print_assistant_thoughts(assistant_reply):
|
|||
)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a
|
||||
# JSON object
|
||||
if isinstance(assistant_reply_json, str):
|
||||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
|
@ -108,9 +109,11 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
|
@ -126,35 +129,40 @@ def print_assistant_thoughts(assistant_reply):
|
|||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
||||
logger.typewriter_log(
|
||||
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
||||
)
|
||||
# Speak the assistant's thoughts
|
||||
if cfg.speak_mode and assistant_thoughts_speak:
|
||||
speak.say_text(assistant_thoughts_speak)
|
||||
|
||||
return assistant_reply_json
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
except json.decoder.JSONDecodeError:
|
||||
call_stack = traceback.format_exc()
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
logger.error("Traceback: \n", call_stack)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. I cannot ignore this response."
|
||||
"I have received an invalid JSON response from the OpenAI API."
|
||||
" I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
call_stack = traceback.format_exc()
|
||||
logger.error("Error: \n", call_stack)
|
||||
|
||||
|
||||
def construct_prompt():
|
||||
"""Construct the prompt for the AI to respond to"""
|
||||
config = AIConfig.load(cfg.ai_settings_file)
|
||||
config: AIConfig = AIConfig.load(cfg.ai_settings_file)
|
||||
if cfg.skip_reprompt and config.ai_name:
|
||||
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, config.ai_goals)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
||||
elif config.ai_name:
|
||||
logger.typewriter_log(
|
||||
f"Welcome back! ",
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
speak_text=True,
|
||||
|
@ -177,8 +185,7 @@ Continue (y/n): """
|
|||
global ai_name
|
||||
ai_name = config.ai_name
|
||||
|
||||
full_prompt = config.construct_full_prompt()
|
||||
return full_prompt
|
||||
return config.construct_full_prompt()
|
||||
|
||||
|
||||
def prompt_user():
|
||||
|
@ -188,7 +195,8 @@ def prompt_user():
|
|||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load defaults.",
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
|
@ -208,17 +216,20 @@ def prompt_user():
|
|||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'",
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
|
||||
# Enter up to 5 goals for the AI
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'",
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||
" multiple businesses autonomously'",
|
||||
)
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
ai_goals = []
|
||||
|
@ -279,7 +290,8 @@ def parse_arguments():
|
|||
"--ai-settings",
|
||||
"-C",
|
||||
dest="ai_settings_file",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically"
|
||||
" skip the re-prompt.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -292,7 +304,9 @@ def parse_arguments():
|
|||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.",
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may"
|
||||
" cause your AI to run forever or carry out actions you would not usually"
|
||||
" authorise. Use at your own risk.",
|
||||
)
|
||||
cfg.set_continuous_mode(True)
|
||||
|
||||
|
@ -327,7 +341,7 @@ def parse_arguments():
|
|||
Fore.RED,
|
||||
f"{supported_memory}",
|
||||
)
|
||||
logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
|
||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
|
||||
else:
|
||||
cfg.memory_backend = chosen
|
||||
|
||||
|
@ -361,14 +375,16 @@ def main():
|
|||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
result = None
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
user_input = "Determine which next command to use, and respond using the format specified above:"
|
||||
user_input = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
print("Using memory of type: " + memory.__class__.__name__)
|
||||
print(f"Using memory of type: {memory.__class__.__name__}")
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
|
@ -412,6 +428,8 @@ class Agent:
|
|||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
|
@ -456,10 +474,13 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous"
|
||||
" commands, 'n' to exit program, or enter feedback for"
|
||||
f" {self.ai_name}...",
|
||||
flush=True,
|
||||
)
|
||||
while True:
|
||||
|
@ -477,7 +498,8 @@ class Agent:
|
|||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
"Invalid input format. Please enter 'y -n' where n is the number of continuous tasks."
|
||||
"Invalid input format. Please enter 'y -n' where n"
|
||||
" is the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
|
@ -503,18 +525,22 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: " + arguments
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
else:
|
||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||
result = (
|
||||
f"Command {command_name} "
|
||||
f"returned: {cmd.execute_command(command_name, arguments)}"
|
||||
)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
import json
|
||||
import regex
|
||||
import traceback
|
||||
from tkinter.ttk import Style
|
||||
|
||||
from colorama import Fore
|
||||
from colorama import Fore, Style
|
||||
|
||||
import autogpt.chat
|
||||
from autogpt.chat import chat_with_ai, create_chat_message
|
||||
import autogpt.commands as cmd
|
||||
import autogpt.speak
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
from autogpt.logger import logger
|
||||
from autogpt.speak import say_text
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import clean_input
|
||||
|
||||
|
||||
class Agent:
|
||||
|
@ -45,6 +47,8 @@ class Agent:
|
|||
# Interaction Loop
|
||||
cfg = Config()
|
||||
loop_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
while True:
|
||||
# Discontinue if continuous limit is reached
|
||||
loop_count += 1
|
||||
|
@ -60,7 +64,7 @@ class Agent:
|
|||
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat.chat_with_ai(
|
||||
assistant_reply = chat_with_ai(
|
||||
self.prompt,
|
||||
self.user_input,
|
||||
self.full_message_history,
|
||||
|
@ -77,7 +81,7 @@ class Agent:
|
|||
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(f"I want to execute {command_name}")
|
||||
say_text(f"I want to execute {command_name}")
|
||||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
|
@ -89,14 +93,17 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
print(
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
|
||||
"Enter 'y' to authorise command, 'y -N' to run N continuous "
|
||||
"commands, 'n' to exit program, or enter feedback for "
|
||||
f"{self.ai_name}...",
|
||||
flush=True,
|
||||
)
|
||||
while True:
|
||||
console_input = utils.clean_input(
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().rstrip() == "y":
|
||||
|
@ -110,7 +117,8 @@ class Agent:
|
|||
self.user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print(
|
||||
"Invalid input format. Please enter 'y -n' where n is the number of continuous tasks."
|
||||
"Invalid input format. Please enter 'y -n' where n is"
|
||||
" the number of continuous tasks."
|
||||
)
|
||||
continue
|
||||
break
|
||||
|
@ -136,18 +144,22 @@ class Agent:
|
|||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
||||
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
# Execute command
|
||||
if command_name is not None and command_name.lower().startswith("error"):
|
||||
result = (
|
||||
f"Command {command_name} threw the following error: " + arguments
|
||||
f"Command {command_name} threw the following error: {arguments}"
|
||||
)
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {self.user_input}"
|
||||
else:
|
||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||
result = (
|
||||
f"Command {command_name} returned: "
|
||||
f"{cmd.execute_command(command_name, arguments)}"
|
||||
)
|
||||
if self.next_action_count > 0:
|
||||
self.next_action_count -= 1
|
||||
|
||||
|
@ -162,13 +174,11 @@ class Agent:
|
|||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message("system", result)
|
||||
)
|
||||
self.full_message_history.append(create_chat_message("system", result))
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
self.full_message_history.append(
|
||||
chat.create_chat_message("system", "Unable to execute command")
|
||||
create_chat_message("system", "Unable to execute command")
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
||||
|
@ -178,15 +188,13 @@ class Agent:
|
|||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
||||
cfg = Config()
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. Trying to fix it now."
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. "
|
||||
"Trying to fix it now."
|
||||
)
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
# Use regex to search for JSON objects
|
||||
import regex
|
||||
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
|
@ -197,40 +205,40 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
|||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("Apparently json was fixed.")
|
||||
say_text("Apparently json was fixed.")
|
||||
else:
|
||||
raise ValueError("No valid JSON object found")
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("Didn't work. I will have to ignore this response then.")
|
||||
say_text("Didn't work. I will have to ignore this response then.")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
json_string = {}
|
||||
|
||||
return json_string
|
||||
|
||||
|
||||
def print_assistant_thoughts(assistant_reply):
|
||||
def print_assistant_thoughts(ai_name, assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
global ai_name
|
||||
global cfg
|
||||
cfg = Config()
|
||||
try:
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError as e:
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
if isinstance(assistant_reply_json, str):
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
|
||||
# Check if assistant_reply_json is a string and attempt to parse
|
||||
# it into a JSON object
|
||||
if isinstance(assistant_reply_json, str):
|
||||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
|
@ -242,6 +250,8 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
if not isinstance(assistant_reply_json, dict):
|
||||
assistant_reply_json = {}
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
|
||||
|
@ -252,9 +262,11 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
|
@ -270,20 +282,23 @@ def print_assistant_thoughts(assistant_reply):
|
|||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
||||
logger.typewriter_log(
|
||||
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
||||
)
|
||||
# Speak the assistant's thoughts
|
||||
if cfg.speak_mode and assistant_thoughts_speak:
|
||||
speak.say_text(assistant_thoughts_speak)
|
||||
say_text(assistant_thoughts_speak)
|
||||
|
||||
return assistant_reply_json
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. I cannot ignore this response."
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API."
|
||||
" I cannot ignore this response."
|
||||
)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
call_stack = traceback.format_exc()
|
||||
logger.error("Error: \n", call_stack)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import os
|
||||
|
||||
from typing import Type
|
||||
import yaml
|
||||
|
||||
from autogpt.prompt import get_prompt
|
||||
|
@ -37,14 +37,16 @@ class AIConfig:
|
|||
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
|
||||
|
||||
@classmethod
|
||||
def load(cls: object, config_file: str = SAVE_FILE) -> object:
|
||||
def load(cls: "Type[AIConfig]", config_file: str = SAVE_FILE) -> "Type[AIConfig]":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
cls (class object): An AIConfig Class object.
|
||||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
config_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
|
@ -59,7 +61,7 @@ class AIConfig:
|
|||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
|
||||
# type: Type[AIConfig]
|
||||
return cls(ai_name, ai_role, ai_goals)
|
||||
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
|
@ -67,7 +69,8 @@ class AIConfig:
|
|||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
config_file(str): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
@ -89,10 +92,16 @@ class AIConfig:
|
|||
None
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
full_prompt (str): A string containing the initial prompt for the user
|
||||
including the ai_name, ai_role and ai_goals.
|
||||
"""
|
||||
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
prompt_start = (
|
||||
"Your decisions must always be made independently without"
|
||||
"seeking user assistance. Play to your strengths as an LLM and pursue"
|
||||
" simple strategies with no legal complications."
|
||||
""
|
||||
)
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = (
|
||||
|
|
|
@ -9,26 +9,29 @@ cfg = Config()
|
|||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat completion api call.
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to improve the code.
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
args = [code]
|
||||
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
|
||||
description_string = (
|
||||
"Analyzes the given code and returns a list of suggestions" " for improvements."
|
||||
)
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
|
||||
return result_string
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
||||
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create chat completion api call.
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (List): A list of suggestions around what needs to be improved.
|
||||
|
@ -41,28 +44,34 @@ def improve_code(suggestions: List[str], code: str) -> str:
|
|||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
|
||||
description_string = (
|
||||
"Improves the provided code based on the suggestions"
|
||||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
||||
|
||||
def write_tests(code: str, focus: List[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create chat completion api call.
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code in response.
|
||||
A result string from create chat completion. Test cases for the submitted code
|
||||
in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
args = [code, json.dumps(focus)]
|
||||
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
|
||||
description_string = (
|
||||
"Generates test cases for the existing code, focusing on"
|
||||
" specific areas if required."
|
||||
)
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
return call_ai_function(function_string, args, description_string)
|
||||
|
|
|
@ -63,7 +63,8 @@ def get_response(url, timeout=10):
|
|||
return None, "Error: " + str(ve)
|
||||
|
||||
except requests.exceptions.RequestException as re:
|
||||
# Handle exceptions related to the HTTP request (e.g., connection errors, timeouts, etc.)
|
||||
# Handle exceptions related to the HTTP request
|
||||
# (e.g., connection errors, timeouts, etc.)
|
||||
return None, "Error: " + str(re)
|
||||
|
||||
|
||||
|
@ -72,6 +73,8 @@ def scrape_text(url):
|
|||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
|
@ -107,7 +110,8 @@ def scrape_links(url):
|
|||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
return error_message
|
||||
|
||||
if not response:
|
||||
return "Error: Could not get response"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
|
@ -141,7 +145,9 @@ def create_message(chunk, question):
|
|||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following question: "{question}" -- if the question cannot be answered using the text, please summarize the text.',
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the'
|
||||
" text, please summarize the text.",
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
||||
|
||||
# This is a magic function that can do anything with no-code. See
|
||||
# https://github.com/Torantulino/AI-Functions for more info.
|
||||
def call_ai_function(function, args, description, model=None):
|
||||
def call_ai_function(function, args, description, model=None) -> str:
|
||||
"""Call an AI function"""
|
||||
if model is None:
|
||||
model = cfg.smart_llm_model
|
||||
|
@ -18,11 +17,10 @@ def call_ai_function(function, args, description, model=None):
|
|||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
|
||||
"content": f"You are now the following python function: ```# {description}"
|
||||
f"\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
response = create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
||||
return response
|
||||
return create_chat_completion(model=model, messages=messages, temperature=0)
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
import logging
|
||||
import time
|
||||
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from openai.error import RateLimitError
|
||||
|
||||
from autogpt import token_counter
|
||||
from autogpt.config import Config
|
||||
|
@ -55,18 +53,22 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
|
|||
def chat_with_ai(
|
||||
prompt, user_input, full_message_history, permanent_memory, token_limit
|
||||
):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory."""
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history,
|
||||
and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
|
||||
Interact with the OpenAI API, sending the prompt, user input,
|
||||
message history, and permanent memory.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the user and the AI.
|
||||
permanent_memory (Obj): The memory object containing the permanent memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the
|
||||
user and the AI.
|
||||
permanent_memory (Obj): The memory object containing the permanent
|
||||
memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
|
@ -118,7 +120,8 @@ def chat_with_ai(
|
|||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context, after the two system prompts.
|
||||
# Add the most recent message to the start of the current context,
|
||||
# after the two system prompts.
|
||||
current_context.insert(
|
||||
insertion_index, full_message_history[next_message_to_add_index]
|
||||
)
|
||||
|
@ -134,7 +137,9 @@ def chat_with_ai(
|
|||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
||||
# This should never happen, please submit a bug report at
|
||||
# https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
|
@ -149,7 +154,8 @@ def chat_with_ai(
|
|||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||
# TODO: use a model defined elsewhere, so that model can contain
|
||||
# temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=current_context,
|
||||
|
@ -163,7 +169,7 @@ def chat_with_ai(
|
|||
)
|
||||
|
||||
return assistant_reply
|
||||
except openai.error.RateLimitError:
|
||||
except RateLimitError:
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
|
|
|
@ -1,23 +1,29 @@
|
|||
from autogpt import browse
|
||||
import json
|
||||
from autogpt.memory import get_memory
|
||||
import datetime
|
||||
import autogpt.agent_manager as agents
|
||||
from autogpt import speak
|
||||
from autogpt.config import Config
|
||||
import autogpt.ai_functions as ai
|
||||
from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
|
||||
from autogpt.execute_code import execute_python_file, execute_shell
|
||||
from autogpt.json_parser import fix_and_parse_json
|
||||
from autogpt.image_gen import generate_image
|
||||
from duckduckgo_search import ddg
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
from autogpt.ai_functions import evaluate_code, improve_code, write_tests
|
||||
from autogpt.browse import scrape_links, scrape_text, summarize_text
|
||||
from autogpt.execute_code import execute_python_file, execute_shell
|
||||
from autogpt.file_operations import (
|
||||
append_to_file,
|
||||
delete_file,
|
||||
read_file,
|
||||
search_files,
|
||||
write_to_file,
|
||||
)
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.speak import say_text
|
||||
from autogpt.web import browse_website
|
||||
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def is_valid_int(value):
|
||||
def is_valid_int(value) -> bool:
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
|
@ -33,7 +39,12 @@ def get_command(response):
|
|||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
@ -58,7 +69,8 @@ def execute_command(command_name, arguments):
|
|||
try:
|
||||
if command_name == "google":
|
||||
# Check if the Google API key is set and use the official search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial
|
||||
# search method
|
||||
key = cfg.google_api_key
|
||||
if key and key.strip() and key != "your-google-api-key":
|
||||
return google_official_search(arguments["input"])
|
||||
|
@ -96,18 +108,22 @@ def execute_command(command_name, arguments):
|
|||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again"
|
||||
elif command_name == "evaluate_code":
|
||||
return ai.evaluate_code(arguments["code"])
|
||||
return evaluate_code(arguments["code"])
|
||||
elif command_name == "improve_code":
|
||||
return ai.improve_code(arguments["suggestions"], arguments["code"])
|
||||
return improve_code(arguments["suggestions"], arguments["code"])
|
||||
elif command_name == "write_tests":
|
||||
return ai.write_tests(arguments["code"], arguments.get("focus"))
|
||||
return write_tests(arguments["code"], arguments.get("focus"))
|
||||
elif command_name == "execute_python_file": # Add this command
|
||||
return execute_python_file(arguments["file"])
|
||||
elif command_name == "execute_shell":
|
||||
if cfg.execute_local_commands:
|
||||
return execute_shell(arguments["command_line"])
|
||||
else:
|
||||
return "You are not allowed to run local shell commands. To execute shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' in your config. Do not attempt to bypass the restriction."
|
||||
return (
|
||||
"You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction."
|
||||
)
|
||||
elif command_name == "generate_image":
|
||||
return generate_image(arguments["prompt"])
|
||||
elif command_name == "do_nothing":
|
||||
|
@ -115,7 +131,11 @@ def execute_command(command_name, arguments):
|
|||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
# All errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
@ -131,6 +151,9 @@ def get_datetime():
|
|||
def google_search(query, num_results=8):
|
||||
"""Return the results of a google search"""
|
||||
search_results = []
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
for j in ddg(query, max_results=num_results):
|
||||
search_results.append(j)
|
||||
|
||||
|
@ -185,61 +208,14 @@ def google_official_search(query, num_results=8):
|
|||
|
||||
def get_text_summary(url, question):
|
||||
"""Return the results of a google search"""
|
||||
text = browse.scrape_text(url)
|
||||
summary = browse.summarize_text(url, text, question)
|
||||
text = scrape_text(url)
|
||||
summary = summarize_text(url, text, question)
|
||||
return """ "Result" : """ + summary
|
||||
|
||||
|
||||
def get_hyperlinks(url):
|
||||
"""Return the results of a google search"""
|
||||
link_list = browse.scrape_links(url)
|
||||
return link_list
|
||||
|
||||
|
||||
def commit_memory(string):
|
||||
"""Commit a string to memory"""
|
||||
_text = f"""Committing memory with string "{string}" """
|
||||
mem.permanent_memory.append(string)
|
||||
return _text
|
||||
|
||||
|
||||
def delete_memory(key):
|
||||
"""Delete a memory with a given key"""
|
||||
if key >= 0 and key < len(mem.permanent_memory):
|
||||
_text = "Deleting memory with key " + str(key)
|
||||
del mem.permanent_memory[key]
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print("Invalid key, cannot delete memory.")
|
||||
return None
|
||||
|
||||
|
||||
def overwrite_memory(key, string):
|
||||
"""Overwrite a memory with a given key and string"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
key_int = int(key)
|
||||
# Check if the integer key is within the range of the permanent_memory list
|
||||
if 0 <= key_int < len(mem.permanent_memory):
|
||||
_text = "Overwriting memory with key " + str(key) + " and string " + string
|
||||
# Overwrite the memory slot with the given integer key and string
|
||||
mem.permanent_memory[key_int] = string
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print(f"Invalid key '{key}', out of range.")
|
||||
return None
|
||||
# Check if the key is a valid string
|
||||
elif isinstance(key, str):
|
||||
_text = "Overwriting memory with key " + key + " and string " + string
|
||||
# Overwrite the memory slot with the given string key and string
|
||||
mem.permanent_memory[key] = string
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print(f"Invalid key '{key}', must be an integer or a string.")
|
||||
return None
|
||||
return scrape_links(url)
|
||||
|
||||
|
||||
def shutdown():
|
||||
|
@ -250,8 +226,6 @@ def shutdown():
|
|||
|
||||
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
||||
"""Start an agent with a given name, task, and prompt"""
|
||||
global cfg
|
||||
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
|
@ -260,22 +234,20 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
|||
|
||||
# Create agent
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(agent_intro, 1)
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = agents.create_agent(task, first_message, model)
|
||||
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = message_agent(key, prompt)
|
||||
agent_response = agents.message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
def message_agent(key, message):
|
||||
"""Message an agent with a given key and message"""
|
||||
global cfg
|
||||
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = agents.message_agent(int(key), message)
|
||||
|
@ -287,18 +259,16 @@ def message_agent(key, message):
|
|||
|
||||
# Speak response
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(agent_response, 1)
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
def list_agents():
|
||||
"""List all agents"""
|
||||
return agents.list_agents()
|
||||
return list_agents()
|
||||
|
||||
|
||||
def delete_agent(key):
|
||||
"""Delete an agent with a given key"""
|
||||
result = agents.delete_agent(key)
|
||||
if not result:
|
||||
return f"Agent {key} does not exist."
|
||||
return f"Agent {key} deleted."
|
||||
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
||||
|
|
|
@ -34,11 +34,12 @@ def ingest_directory(directory, memory, args):
|
|||
print(f"Error while ingesting directory '{directory}': {str(e)}")
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
logger = configure_logging()
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script."
|
||||
description="Ingest a file or a directory with multiple files into memory. "
|
||||
"Make sure to set your .env before running this script."
|
||||
)
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("--file", type=str, help="The file to ingest.")
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
WORKSPACE_FOLDER = "auto_gpt_workspace"
|
||||
|
||||
|
@ -35,7 +36,7 @@ def execute_python_file(file):
|
|||
try:
|
||||
client.images.get(image_name)
|
||||
print(f"Image '{image_name}' found locally")
|
||||
except docker.errors.ImageNotFound:
|
||||
except ImageNotFound:
|
||||
print(
|
||||
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||
)
|
||||
|
@ -68,7 +69,7 @@ def execute_python_file(file):
|
|||
detach=True,
|
||||
)
|
||||
|
||||
output = container.wait()
|
||||
container.wait()
|
||||
logs = container.logs().decode("utf-8")
|
||||
container.remove()
|
||||
|
||||
|
@ -84,7 +85,7 @@ def execute_python_file(file):
|
|||
def execute_shell(command_line):
|
||||
current_dir = os.getcwd()
|
||||
|
||||
if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary
|
||||
if WORKSPACE_FOLDER not in current_dir: # Change dir into workspace if necessary
|
||||
work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER)
|
||||
os.chdir(work_dir)
|
||||
|
||||
|
|
|
@ -26,8 +26,10 @@ def split_file(content, max_length=4000, overlap=0):
|
|||
between chunks.
|
||||
|
||||
:param text: The input text to be split into chunks
|
||||
:param max_length: The maximum length of each chunk, default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks, default is no overlap
|
||||
:param max_length: The maximum length of each chunk,
|
||||
default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks,
|
||||
default is no overlap
|
||||
:return: A generator yielding chunks of text
|
||||
"""
|
||||
start = 0
|
||||
|
@ -43,7 +45,7 @@ def split_file(content, max_length=4000, overlap=0):
|
|||
start += max_length - overlap
|
||||
|
||||
|
||||
def read_file(filename):
|
||||
def read_file(filename) -> str:
|
||||
"""Read a file and return the contents"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
|
@ -51,7 +53,7 @@ def read_file(filename):
|
|||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def ingest_file(filename, memory, max_length=4000, overlap=200):
|
||||
|
|
|
@ -42,6 +42,10 @@ def generate_image(prompt):
|
|||
API_URL = (
|
||||
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
if cfg.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
|
||||
|
||||
response = requests.post(
|
||||
|
|
|
@ -12,7 +12,7 @@ JSON_SCHEMA = """
|
|||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
|
@ -49,7 +50,7 @@ def add_quotes_to_property_names(json_string: str) -> str:
|
|||
raise e
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> str:
|
||||
def balance_braces(json_string: str) -> Optional[str]:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
|
@ -74,7 +75,7 @@ def balance_braces(json_string: str) -> str:
|
|||
try:
|
||||
json.loads(json_string)
|
||||
return json_string
|
||||
except json.JSONDecodeError as e:
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import time
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
|
@ -18,6 +19,12 @@ def create_chat_completion(
|
|||
"""Create a chat completion using the OpenAI API"""
|
||||
response = None
|
||||
num_retries = 5
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
Fore.GREEN
|
||||
+ f"Creating chat completion with model {model}, temperature {temperature},"
|
||||
f" max_tokens {max_tokens}" + Fore.RESET
|
||||
)
|
||||
for attempt in range(num_retries):
|
||||
try:
|
||||
if cfg.use_azure:
|
||||
|
@ -36,14 +43,14 @@ def create_chat_completion(
|
|||
max_tokens=max_tokens,
|
||||
)
|
||||
break
|
||||
except openai.error.RateLimitError:
|
||||
except RateLimitError:
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
Fore.RED + "Error: ",
|
||||
"API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET,
|
||||
)
|
||||
time.sleep(20)
|
||||
except openai.error.APIError as e:
|
||||
except APIError as e:
|
||||
if e.http_status == 502:
|
||||
if cfg.debug_mode:
|
||||
print(
|
||||
|
|
|
@ -54,7 +54,8 @@ class Logger(metaclass=Singleton):
|
|||
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
|
||||
error_handler.setLevel(logging.ERROR)
|
||||
error_formatter = AutoGptFormatter(
|
||||
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s"
|
||||
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
|
||||
" %(message_no_color)s"
|
||||
)
|
||||
error_handler.setFormatter(error_formatter)
|
||||
|
||||
|
@ -117,7 +118,12 @@ class Logger(metaclass=Singleton):
|
|||
|
||||
def double_check(self, additionalText=None):
|
||||
if not additionalText:
|
||||
additionalText = "Please ensure you've setup and configured everything correctly. Read https://github.com/Torantulino/Auto-GPT#readme to double check. You can also create a github issue or join the discord and ask there!"
|
||||
additionalText = (
|
||||
"Please ensure you've setup and configured everything"
|
||||
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
|
||||
"double check. You can also create a github issue or join the discord"
|
||||
" and ask there!"
|
||||
)
|
||||
|
||||
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
||||
|
||||
|
@ -150,7 +156,7 @@ class TypingConsoleHandler(logging.StreamHandler):
|
|||
|
||||
|
||||
class ConsoleHandler(logging.StreamHandler):
|
||||
def emit(self, record):
|
||||
def emit(self, record) -> None:
|
||||
msg = self.format(record)
|
||||
try:
|
||||
print(msg)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.config import Config, Singleton
|
||||
from autogpt.logger import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
from autogpt.promptgenerator import PromptGenerator
|
||||
|
||||
|
||||
def get_prompt():
|
||||
def get_prompt() -> str:
|
||||
"""
|
||||
This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations.
|
||||
This function generates a prompt string that includes various constraints,
|
||||
commands, resources, and performance evaluations.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
|
@ -14,10 +15,12 @@ def get_prompt():
|
|||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files."
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so"
|
||||
" immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember."
|
||||
"If you are unsure how you previously did something or want to recall past"
|
||||
" events, thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint(
|
||||
|
@ -87,7 +90,8 @@ def get_prompt():
|
|||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions to ensure you are performing to the best of your abilities."
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
|
@ -96,10 +100,9 @@ def get_prompt():
|
|||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps."
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
|
||||
# Generate the prompt string
|
||||
prompt_string = prompt_generator.generate_prompt_string()
|
||||
|
||||
return prompt_string
|
||||
return prompt_generator.generate_prompt_string()
|
||||
|
|
|
@ -3,12 +3,14 @@ import json
|
|||
|
||||
class PromptGenerator:
|
||||
"""
|
||||
A class for generating custom prompt strings based on constraints, commands, resources, and performance evaluations.
|
||||
A class for generating custom prompt strings based on constraints, commands,
|
||||
resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the PromptGenerator object with empty lists of constraints, commands, resources, and performance evaluations.
|
||||
Initialize the PromptGenerator object with empty lists of constraints,
|
||||
commands, resources, and performance evaluations.
|
||||
"""
|
||||
self.constraints = []
|
||||
self.commands = []
|
||||
|
@ -41,7 +43,8 @@ class PromptGenerator:
|
|||
Args:
|
||||
command_label (str): The label of the command.
|
||||
command_name (str): The name of the command.
|
||||
args (dict, optional): A dictionary containing argument names and their values. Defaults to None.
|
||||
args (dict, optional): A dictionary containing argument names and their
|
||||
values. Defaults to None.
|
||||
"""
|
||||
if args is None:
|
||||
args = {}
|
||||
|
@ -71,7 +74,7 @@ class PromptGenerator:
|
|||
)
|
||||
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
|
||||
|
||||
def add_resource(self, resource):
|
||||
def add_resource(self, resource: str) -> None:
|
||||
"""
|
||||
Add a resource to the resources list.
|
||||
|
||||
|
@ -80,7 +83,7 @@ class PromptGenerator:
|
|||
"""
|
||||
self.resources.append(resource)
|
||||
|
||||
def add_performance_evaluation(self, evaluation):
|
||||
def add_performance_evaluation(self, evaluation: str) -> None:
|
||||
"""
|
||||
Add a performance evaluation item to the performance_evaluation list.
|
||||
|
||||
|
@ -89,13 +92,14 @@ class PromptGenerator:
|
|||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items, item_type="list"):
|
||||
def _generate_numbered_list(self, items, item_type="list") -> str:
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
Args:
|
||||
items (list): A list of items to be numbered.
|
||||
item_type (str, optional): The type of items in the list. Defaults to 'list'.
|
||||
item_type (str, optional): The type of items in the list.
|
||||
Defaults to 'list'.
|
||||
|
||||
Returns:
|
||||
str: The formatted numbered list.
|
||||
|
@ -108,20 +112,23 @@ class PromptGenerator:
|
|||
else:
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
||||
|
||||
def generate_prompt_string(self):
|
||||
def generate_prompt_string(self) -> str:
|
||||
"""
|
||||
Generate a prompt string based on the constraints, commands, resources, and performance evaluations.
|
||||
Generate a prompt string based on the constraints, commands, resources,
|
||||
and performance evaluations.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
formatted_response_format = json.dumps(self.response_format, indent=4)
|
||||
prompt_string = (
|
||||
return (
|
||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
||||
f"Commands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
||||
"Commands:\n"
|
||||
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
||||
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
||||
f"Performance Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
f"You should only respond in JSON format as described below \nResponse Format: \n{formatted_response_format} \nEnsure the response can be parsed by Python json.loads"
|
||||
"Performance Evaluation:\n"
|
||||
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
"You should only respond in JSON format as described below \nResponse"
|
||||
f" Format: \n{formatted_response_format} \nEnsure the response can be"
|
||||
"parsed by Python json.loads"
|
||||
)
|
||||
|
||||
return prompt_string
|
||||
|
|
|
@ -5,12 +5,13 @@ from playsound import playsound
|
|||
|
||||
from autogpt.config import Config
|
||||
|
||||
cfg = Config()
|
||||
import threading
|
||||
from threading import Lock, Semaphore
|
||||
|
||||
import gtts
|
||||
|
||||
cfg = Config()
|
||||
|
||||
# Default voice IDs
|
||||
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ class Spinner:
|
|||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
"""Stop the spinner"""
|
||||
self.running = False
|
||||
self.spinner_thread.join()
|
||||
if self.spinner_thread is not None:
|
||||
self.spinner_thread.join()
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
||||
sys.stdout.flush()
|
||||
|
|
|
@ -57,7 +57,9 @@ def split_text(text, max_length=8192):
|
|||
def create_message(chunk, question):
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following question: "{question}" -- if the question cannot be answered using the text, please summarize the text.',
|
||||
"content": f'"""{chunk}""" Using the above text, please answer the following'
|
||||
f' question: "{question}" -- if the question cannot be answered using the text,'
|
||||
" please summarize the text.",
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ from typing import Dict, List
|
|||
|
||||
import tiktoken
|
||||
|
||||
from autogpt.logger import logger
|
||||
|
||||
|
||||
def count_message_tokens(
|
||||
messages: List[Dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
||||
|
@ -10,8 +12,10 @@ def count_message_tokens(
|
|||
Returns the number of tokens used by a list of messages.
|
||||
|
||||
Args:
|
||||
messages (list): A list of messages, each of which is a dictionary containing the role and content of the message.
|
||||
model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301".
|
||||
messages (list): A list of messages, each of which is a dictionary
|
||||
containing the role and content of the message.
|
||||
model (str): The name of the model to use for tokenization.
|
||||
Defaults to "gpt-3.5-turbo-0301".
|
||||
|
||||
Returns:
|
||||
int: The number of tokens used by the list of messages.
|
||||
|
@ -22,7 +26,8 @@ def count_message_tokens(
|
|||
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
# !Node: gpt-3.5-turbo may change over time.
|
||||
# Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||
elif model == "gpt-4":
|
||||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
|
@ -37,7 +42,9 @@ def count_message_tokens(
|
|||
tokens_per_name = 1
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
f"num_tokens_from_messages() is not implemented for model {model}.\n"
|
||||
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
|
||||
" information on how messages are converted to tokens."
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
|
@ -62,5 +69,4 @@ def count_string_tokens(string: str, model_name: str) -> int:
|
|||
int: The number of tokens in the text string.
|
||||
"""
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
num_tokens = len(encoding.encode(string))
|
||||
return num_tokens
|
||||
return len(encoding.encode(string))
|
||||
|
|
|
@ -13,8 +13,8 @@ def clean_input(prompt: str = ""):
|
|||
|
||||
def validate_yaml_file(file: str):
|
||||
try:
|
||||
with open(file) as file:
|
||||
yaml.load(file, Loader=yaml.FullLoader)
|
||||
with open(file, encoding="utf-8") as fp:
|
||||
yaml.load(fp.read(), Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
|
||||
except yaml.YAMLError as e:
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
from duckduckgo_search import ddg
|
||||
from selenium import webdriver
|
||||
import autogpt.summary as summary
|
||||
from bs4 import BeautifulSoup
|
||||
import json
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.chrome.service import Service as ChromeService
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
import os
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from autogpt.config import Config
|
||||
|
|
Loading…
Reference in New Issue