Code review changes

pull/33/head
Andres Caicedo 2023-04-09 15:39:11 +02:00
parent 54cbf1cae1
commit 011699e6a1
18 changed files with 28 additions and 98 deletions

2
.gitignore vendored
View File

@ -8,4 +8,4 @@ scripts/auto_gpt_workspace/*
*.mpeg
.env
last_run_ai_settings.yaml
outputs/*
outputs/*

View File

@ -2,7 +2,7 @@ FROM python:3.11
WORKDIR /app
COPY scripts/ /app
COPY requirements.txt /app/requirements.txt
RUN pip install -r requirements.txt
CMD ["python", "main.py"]

View File

@ -13,7 +13,6 @@ class AIConfig:
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = "last_run_ai_settings.yaml"
@classmethod
def load(cls, config_file=SAVE_FILE):
"""Load variables from yaml file if it exists, otherwise use defaults."""
@ -29,7 +28,6 @@ class AIConfig:
return cls(ai_name, ai_role, ai_goals)
def save(self, config_file=SAVE_FILE):
"""Save variables to yaml file."""
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}

View File

@ -1,5 +1,5 @@
from typing import List, Optional
from json import dumps
import json
from config import Config
from call_ai_function import call_ai_function
from json_parser import fix_and_parse_json
@ -23,7 +23,7 @@ def improve_code(suggestions: List[str], code: str) -> str:
function_string = (
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
)
args = [dumps(suggestions), code]
args = [json.dumps(suggestions), code]
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
result_string = call_ai_function(function_string, args, description_string)
@ -36,7 +36,7 @@ def write_tests(code: str, focus: List[str]) -> str:
function_string = (
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, dumps(focus)]
args = [code, json.dumps(focus)]
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
result_string = call_ai_function(function_string, args, description_string)

View File

@ -1,14 +1,13 @@
from requests import get
import requests
from bs4 import BeautifulSoup
from config import Config
from llm_utils import create_chat_completion
cfg = Config()
def scrape_text(url):
"""Scrape text from a webpage"""
response = get(url)
response = requests.get(url)
# Check if the response contains an HTTP error
if response.status_code >= 400:
@ -30,26 +29,22 @@ def scrape_text(url):
def extract_hyperlinks(soup):
"""Extract hyperlinks from a BeautifulSoup object"""
hyperlinks = []
for link in soup.find_all('a', href=True):
hyperlinks.append((link.text, link['href']))
return hyperlinks
def format_hyperlinks(hyperlinks):
"""Format hyperlinks into a list of strings"""
formatted_links = []
for link_text, link_url in hyperlinks:
formatted_links.append(f"{link_text} ({link_url})")
return formatted_links
def scrape_links(url):
"""Scrape hyperlinks from a webpage"""
response = get(url)
response = requests.get(url)
# Check if the response contains an HTTP error
if response.status_code >= 400:
@ -72,7 +67,6 @@ def split_text(text, max_length=8192):
current_chunk = []
for paragraph in paragraphs:
if current_length + len(paragraph) + 1 <= max_length:
current_chunk.append(paragraph)
current_length += len(paragraph) + 1

View File

@ -1,8 +1,8 @@
from config import Config
from llm_utils import create_chat_completion
cfg = Config()
from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=cfg.smart_llm_model):

View File

@ -1,11 +1,11 @@
from time import sleep
import time
import openai
from dotenv import load_dotenv
from config import Config
import token_counter
from llm_utils import create_chat_completion
cfg = Config()
cfg = Config()
def create_chat_message(role, content):
"""
@ -48,10 +48,8 @@ def chat_with_ai(
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if debug:
print(f"Token limit: {token_limit}")
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
current_context = [
@ -73,7 +71,6 @@ def chat_with_ai(
message_to_add = full_message_history[next_message_to_add_index]
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
@ -99,16 +96,13 @@ def chat_with_ai(
print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}")
print("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
print(
f"{message['role'].capitalize()}: {message['content']}")
print()
print("----------- END OF CONTEXT ----------------")
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
@ -130,4 +124,4 @@ def chat_with_ai(
except openai.error.RateLimitError:
# TODO: WHen we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
sleep(10)
time.sleep(10)

View File

@ -1,7 +1,7 @@
import browse
import json
import memory as mem
from datetime import datetime
import datetime
import agent_manager as agents
import speak
from config import Config
@ -110,7 +110,7 @@ def execute_command(command_name, arguments):
def get_datetime():
"""Return the current date and time"""
return "Current date and time: " + \
datetime.now().strftime("%Y-%m-%d %H:%M:%S")
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def google_search(query, num_results=8):

View File

@ -44,52 +44,42 @@ class Config(metaclass=Singleton):
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
def set_continuous_mode(self, value: bool):
"""Set the continuous mode value."""
self.continuous_mode = value
def set_speak_mode(self, value: bool):
"""Set the speak mode value."""
self.speak_mode = value
def set_fast_llm_model(self, value: str):
"""Set the fast LLM model value."""
self.fast_llm_model = value
def set_smart_llm_model(self, value: str):
"""Set the smart LLM model value."""
self.smart_llm_model = value
def set_fast_token_limit(self, value: int):
"""Set the fast token limit value."""
self.fast_token_limit = value
def set_smart_token_limit(self, value: int):
"""Set the smart token limit value."""
self.smart_token_limit = value
def set_openai_api_key(self, value: str):
"""Set the OpenAI API key value."""
self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str):
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_google_api_key(self, value: str):
"""Set the Google API key value."""
self.google_api_key = value
def set_custom_search_engine_id(self, value: str):
"""Set the custom search engine ID value."""
self.custom_search_engine_id = value
self.custom_search_engine_id = value

View File

@ -1,4 +1,4 @@
from os import path
import os
from pathlib import Path
SRC_DIR = Path(__file__).parent
@ -6,7 +6,7 @@ def load_prompt():
"""Load the prompt from data/prompt.txt"""
try:
# get directory of this file:
file_dir = Path(path.dirname(path.realpath(__file__)))
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
data_dir = file_dir / "data"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt

View File

@ -1,5 +1,5 @@
import docker
from os import path
import os
def execute_python_file(file):
@ -11,9 +11,9 @@ def execute_python_file(file):
if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
file_path = path.join(workspace_folder, file)
file_path = os.path.join(workspace_folder, file)
if not path.isfile(file_path):
if not os.path.isfile(file_path):
return f"Error: File '{file}' does not exist."
try:

View File

@ -36,10 +36,8 @@ def write_to_file(filename, text):
try:
filepath = safe_join(working_directory, filename)
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, "w") as f:
f.write(text)
return "File written to successfully."

View File

@ -1,7 +1,6 @@
import json
from call_ai_function import call_ai_function
from config import Config
cfg = Config()
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
@ -38,18 +37,15 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
json_str = json_str[:last_brace_index+1]
return json.loads(json_str)
except Exception as e:
if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, json_schema, False)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
else:
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
return json_str
else:
raise e
@ -63,11 +59,9 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
json_str = "```json\n" + json_str + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if debug:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")

View File

@ -1,6 +1,5 @@
import openai
from config import Config
cfg = Config()
openai.api_key = cfg.openai_api_key

View File

@ -27,24 +27,17 @@ def print_to_console(
max_typing_speed=0.01):
"""Prints text to the console with a typing effect"""
global cfg
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="")
if content:
if isinstance(content, list):
content = " ".join(content)
words = content.split()
for i, word in enumerate(words):
print(word, end="", flush=True)
if i < len(words) - 1:
print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
time.sleep(typing_speed)
# type faster after each word
@ -88,7 +81,6 @@ def print_assistant_thoughts(assistant_reply):
if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
@ -96,7 +88,6 @@ def print_assistant_thoughts(assistant_reply):
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split('\n')
for line in lines:
line = line.lstrip("- ")
print_to_console("- ", Fore.GREEN, line.strip())
@ -131,13 +122,11 @@ def load_variables(config_file="config.yaml"):
# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
if not ai_role:
ai_role = input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@ -146,20 +135,16 @@ def load_variables(config_file="config.yaml"):
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
# Save variables to yaml file
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
with open(config_file, "w") as file:
documents = yaml.dump(config, file)
@ -168,7 +153,6 @@ def load_variables(config_file="config.yaml"):
# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"
@ -179,7 +163,6 @@ def load_variables(config_file="config.yaml"):
def construct_prompt():
"""Construct the prompt for the AI to respond to"""
config = AIConfig.load()
if config.ai_name:
print_to_console(
f"Welcome back! ",
@ -190,8 +173,7 @@ def construct_prompt():
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
Continue (y/n): """)
Continue (y/n): """)
if should_continue.lower() == "n":
config = AIConfig()
@ -221,10 +203,8 @@ def prompt_user():
print_to_console(
"Name your AI: ",
Fore.GREEN,
"For example, 'Entrepreneur-GPT'")
"For example, 'Entrepreneur-GPT'")
ai_name = input("AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
@ -240,7 +220,6 @@ def prompt_user():
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
ai_role = input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
@ -248,19 +227,14 @@ def prompt_user():
print_to_console(
"Enter up to 5 goals for your AI: ",
Fore.GREEN,
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
ai_goal = input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = ["Increase net worth", "Grow Twitter Account",
"Develop and manage multiple businesses autonomously"]
@ -268,7 +242,6 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals)
return config
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
@ -346,8 +319,7 @@ while True:
f"Enter 'y' to authorise command or 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break

View File

@ -2,7 +2,6 @@ import os
from playsound import playsound
import requests
from config import Config
cfg = Config()
import gtts
@ -20,7 +19,8 @@ def eleven_labs_speech(text, voice_index=0):
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
voice_id=voices[voice_index])
formatted_message = {"text": text}
response = requests.post(tts_url, headers=tts_headers, json=formatted_message)
response = requests.post(
tts_url, headers=tts_headers, json=formatted_message)
if response.status_code == 200:
with open("speech.mpeg", "wb") as f:

View File

@ -14,7 +14,6 @@ class Spinner:
self.running = False
self.spinner_thread = None
def spin(self):
"""Spin the spinner"""
while self.running:
@ -23,14 +22,12 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write('\b' * (len(self.message) + 2))
def __enter__(self):
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Stop the spinner"""
self.running = False

View File

@ -1,7 +1,6 @@
import tiktoken
from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
"""
Returns the number of tokens used by a list of messages.
@ -18,7 +17,6 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
@ -34,19 +32,15 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.