Merge branch 'master' into fix-user-feedback-json-error
commit
1f9d66b745
|
@ -10,4 +10,5 @@ auto_gpt_workspace/*
|
|||
venv/*
|
||||
outputs/*
|
||||
ai_settings.yaml
|
||||
.vscode
|
||||
auto-gpt.json
|
||||
|
|
|
@ -2,6 +2,7 @@ FROM python:3.11
|
|||
|
||||
WORKDIR /app
|
||||
COPY scripts/ /app
|
||||
COPY requirements.txt /app
|
||||
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
|
|
19
README.md
19
README.md
|
@ -81,7 +81,7 @@ git clone https://github.com/Torantulino/Auto-GPT.git
|
|||
2. Navigate to the project directory:
|
||||
*(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)*
|
||||
```
|
||||
$ cd 'Auto-GPT'
|
||||
cd 'Auto-GPT'
|
||||
```
|
||||
|
||||
3. Install the required dependencies:
|
||||
|
@ -92,7 +92,7 @@ pip install -r requirements.txt
|
|||
|
||||
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
|
||||
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
|
||||
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
|
||||
- Obtain your ElevenLabs API key from: https://beta.elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
|
||||
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section
|
||||
|
||||
## 🔧 Usage
|
||||
|
@ -114,7 +114,7 @@ python scripts/main.py --speak
|
|||
|
||||
## 🔍 Google API Keys Configuration
|
||||
|
||||
This section is optional, use the official google api if you are having issues with error 429 when running google search.
|
||||
This section is optional, use the official google api if you are having issues with error 429 when running a google search.
|
||||
To use the `google_official_search` command, you need to set up your Google API keys in your environment variables.
|
||||
|
||||
1. Go to the [Google Cloud Console](https://console.cloud.google.com/).
|
||||
|
@ -127,6 +127,8 @@ To use the `google_official_search` command, you need to set up your Google API
|
|||
8. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites.
|
||||
9. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below.
|
||||
|
||||
*Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches.*
|
||||
|
||||
### Setting up environment variables
|
||||
For Windows Users:
|
||||
```
|
||||
|
@ -185,10 +187,15 @@ are loaded for the agent at any given time.
|
|||
3. Find your API key and region under the default project in the left sidebar.
|
||||
|
||||
### Setting up environment variables
|
||||
For Windows Users:
|
||||
|
||||
Simply set them in the `.env` file.
|
||||
|
||||
Alternatively, you can set them from the command line (advanced):
|
||||
|
||||
For Windows Users:
|
||||
```
|
||||
setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY"
|
||||
export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp
|
||||
setx PINECONE_ENV "Your pinecone region" # something like: us-east4-gcp
|
||||
|
||||
```
|
||||
For macOS and Linux users:
|
||||
|
@ -198,7 +205,6 @@ export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp
|
|||
|
||||
```
|
||||
|
||||
Or you can set them in the `.env` file.
|
||||
|
||||
## View Memory Usage
|
||||
|
||||
|
@ -222,6 +228,7 @@ If you don't have access to the GPT4 api, this mode will allow you to use Auto-G
|
|||
```
|
||||
python scripts/main.py --gpt3only
|
||||
```
|
||||
It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data.
|
||||
|
||||
## 🖼 Image Generation
|
||||
By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required.
|
||||
|
|
|
@ -7,6 +7,7 @@ agents = {} # key, (task, full_message_history, model)
|
|||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(task, prompt, model):
|
||||
"""Create a new agent and return its key"""
|
||||
global next_key
|
||||
global agents
|
||||
|
||||
|
@ -32,6 +33,7 @@ def create_agent(task, prompt, model):
|
|||
|
||||
|
||||
def message_agent(key, message):
|
||||
"""Send a message to an agent and return its response"""
|
||||
global agents
|
||||
|
||||
task, messages, model = agents[int(key)]
|
||||
|
@ -52,6 +54,7 @@ def message_agent(key, message):
|
|||
|
||||
|
||||
def list_agents():
|
||||
"""Return a list of all agents"""
|
||||
global agents
|
||||
|
||||
# Return a list of agent keys and their tasks
|
||||
|
@ -59,6 +62,7 @@ def list_agents():
|
|||
|
||||
|
||||
def delete_agent(key):
|
||||
"""Delete an agent and return True if successful, False otherwise"""
|
||||
global agents
|
||||
|
||||
try:
|
||||
|
|
|
@ -3,7 +3,27 @@ import data
|
|||
import os
|
||||
|
||||
class AIConfig:
|
||||
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
|
||||
"""
|
||||
A class object that contains the configuration information for the AI
|
||||
|
||||
Attributes:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
"""
|
||||
|
||||
def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
|
||||
Parameters:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
|
@ -12,8 +32,19 @@ class AIConfig:
|
|||
SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_file=SAVE_FILE):
|
||||
# Load variables from yaml file if it exists
|
||||
def load(cls: object, config_file: str=SAVE_FILE) -> object:
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
cls (class object): An AIConfig Class object.
|
||||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): A instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
@ -26,12 +57,32 @@ class AIConfig:
|
|||
|
||||
return cls(ai_name, ai_role, ai_goals)
|
||||
|
||||
def save(self, config_file=SAVE_FILE):
|
||||
def save(self, config_file: str=SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
yaml.dump(config, file)
|
||||
|
||||
def construct_full_prompt(self):
|
||||
def construct_full_prompt(self) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
|
||||
Parameters:
|
||||
None
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
"""
|
||||
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
|
@ -41,3 +92,4 @@ class AIConfig:
|
|||
|
||||
full_prompt += f"\n\n{data.load_prompt()}"
|
||||
return full_prompt
|
||||
|
||||
|
|
|
@ -5,9 +5,17 @@ from call_ai_function import call_ai_function
|
|||
from json_parser import fix_and_parse_json
|
||||
cfg = Config()
|
||||
|
||||
# Evaluating code
|
||||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
args = [code]
|
||||
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
|
||||
|
@ -17,9 +25,17 @@ def evaluate_code(code: str) -> List[str]:
|
|||
return result_string
|
||||
|
||||
|
||||
# Improving code
|
||||
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||
)
|
||||
|
@ -30,10 +46,18 @@ def improve_code(suggestions: List[str], code: str) -> str:
|
|||
return result_string
|
||||
|
||||
|
||||
# Writing tests
|
||||
|
||||
|
||||
def write_tests(code: str, focus: List[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
|
@ -42,5 +66,3 @@ def write_tests(code: str, focus: List[str]) -> str:
|
|||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ from llm_utils import create_chat_completion
|
|||
cfg = Config()
|
||||
|
||||
def scrape_text(url):
|
||||
"""Scrape text from a webpage"""
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith('http'):
|
||||
return "Error: Invalid URL"
|
||||
|
@ -33,6 +34,7 @@ def scrape_text(url):
|
|||
|
||||
|
||||
def extract_hyperlinks(soup):
|
||||
"""Extract hyperlinks from a BeautifulSoup object"""
|
||||
hyperlinks = []
|
||||
for link in soup.find_all('a', href=True):
|
||||
hyperlinks.append((link.text, link['href']))
|
||||
|
@ -40,6 +42,7 @@ def extract_hyperlinks(soup):
|
|||
|
||||
|
||||
def format_hyperlinks(hyperlinks):
|
||||
"""Format hyperlinks into a list of strings"""
|
||||
formatted_links = []
|
||||
for link_text, link_url in hyperlinks:
|
||||
formatted_links.append(f"{link_text} ({link_url})")
|
||||
|
@ -47,6 +50,7 @@ def format_hyperlinks(hyperlinks):
|
|||
|
||||
|
||||
def scrape_links(url):
|
||||
"""Scrape links from a webpage"""
|
||||
response = requests.get(url, headers=cfg.user_agent_header)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
|
@ -64,6 +68,7 @@ def scrape_links(url):
|
|||
|
||||
|
||||
def split_text(text, max_length=8192):
|
||||
"""Split text into chunks of a maximum length"""
|
||||
paragraphs = text.split("\n")
|
||||
current_length = 0
|
||||
current_chunk = []
|
||||
|
@ -82,12 +87,14 @@ def split_text(text, max_length=8192):
|
|||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
|
||||
}
|
||||
|
||||
def summarize_text(text, question):
|
||||
"""Summarize text using the LLM model"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
from config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
# This is a magic function that can do anything with no-code. See
|
||||
# https://github.com/Torantulino/AI-Functions for more info.
|
||||
def call_ai_function(function, args, description, model=None):
|
||||
"""Call an AI function"""
|
||||
if model is None:
|
||||
model = cfg.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
|
|
|
@ -3,11 +3,9 @@ import openai
|
|||
from dotenv import load_dotenv
|
||||
from config import Config
|
||||
import token_counter
|
||||
|
||||
cfg = Config()
|
||||
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def create_chat_message(role, content):
|
||||
"""
|
||||
|
@ -46,8 +44,8 @@ def chat_with_ai(
|
|||
user_input,
|
||||
full_message_history,
|
||||
permanent_memory,
|
||||
token_limit,
|
||||
debug=False):
|
||||
token_limit):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
|
@ -65,13 +63,15 @@ def chat_with_ai(
|
|||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
if debug:
|
||||
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
if debug:
|
||||
if cfg.debug:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
|
@ -110,7 +110,7 @@ def chat_with_ai(
|
|||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if debug:
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
|
|
|
@ -25,6 +25,7 @@ def is_valid_int(value):
|
|||
return False
|
||||
|
||||
def get_command(response):
|
||||
"""Parse the response and return the command name and arguments"""
|
||||
try:
|
||||
response_json = fix_and_parse_json(response)
|
||||
|
||||
|
@ -53,6 +54,7 @@ def get_command(response):
|
|||
|
||||
|
||||
def execute_command(command_name, arguments):
|
||||
"""Execute the command and return the result"""
|
||||
memory = get_memory(cfg)
|
||||
|
||||
try:
|
||||
|
@ -118,11 +120,13 @@ def execute_command(command_name, arguments):
|
|||
|
||||
|
||||
def get_datetime():
|
||||
"""Return the current date and time"""
|
||||
return "Current date and time: " + \
|
||||
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
|
||||
def google_search(query, num_results=8):
|
||||
"""Return the results of a google search"""
|
||||
search_results = []
|
||||
for j in ddg(query, max_results=num_results):
|
||||
search_results.append(j)
|
||||
|
@ -130,6 +134,7 @@ def google_search(query, num_results=8):
|
|||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
|
||||
def google_official_search(query, num_results=8):
|
||||
"""Return the results of a google search using the official Google API"""
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
import json
|
||||
|
@ -165,6 +170,7 @@ def google_official_search(query, num_results=8):
|
|||
return search_results_links
|
||||
|
||||
def browse_website(url, question):
|
||||
"""Browse a website and return the summary and links"""
|
||||
summary = get_text_summary(url, question)
|
||||
links = get_hyperlinks(url)
|
||||
|
||||
|
@ -178,23 +184,27 @@ def browse_website(url, question):
|
|||
|
||||
|
||||
def get_text_summary(url, question):
|
||||
"""Return the results of a google search"""
|
||||
text = browse.scrape_text(url)
|
||||
summary = browse.summarize_text(text, question)
|
||||
return """ "Result" : """ + summary
|
||||
|
||||
|
||||
def get_hyperlinks(url):
|
||||
"""Return the results of a google search"""
|
||||
link_list = browse.scrape_links(url)
|
||||
return link_list
|
||||
|
||||
|
||||
def commit_memory(string):
|
||||
"""Commit a string to memory"""
|
||||
_text = f"""Committing memory with string "{string}" """
|
||||
mem.permanent_memory.append(string)
|
||||
return _text
|
||||
|
||||
|
||||
def delete_memory(key):
|
||||
"""Delete a memory with a given key"""
|
||||
if key >= 0 and key < len(mem.permanent_memory):
|
||||
_text = "Deleting memory with key " + str(key)
|
||||
del mem.permanent_memory[key]
|
||||
|
@ -206,6 +216,7 @@ def delete_memory(key):
|
|||
|
||||
|
||||
def overwrite_memory(key, string):
|
||||
"""Overwrite a memory with a given key and string"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
key_int = int(key)
|
||||
|
@ -232,11 +243,13 @@ def overwrite_memory(key, string):
|
|||
|
||||
|
||||
def shutdown():
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
||||
"""Start an agent with a given name, task, and prompt"""
|
||||
global cfg
|
||||
|
||||
# Remove underscores from name
|
||||
|
@ -260,6 +273,7 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
|||
|
||||
|
||||
def message_agent(key, message):
|
||||
"""Message an agent with a given key and message"""
|
||||
global cfg
|
||||
|
||||
# Check if the key is a valid integer
|
||||
|
@ -278,10 +292,12 @@ def message_agent(key, message):
|
|||
|
||||
|
||||
def list_agents():
|
||||
"""List all agents"""
|
||||
return agents.list_agents()
|
||||
|
||||
|
||||
def delete_agent(key):
|
||||
"""Delete an agent with a given key"""
|
||||
result = agents.delete_agent(key)
|
||||
if not result:
|
||||
return f"Agent {key} does not exist."
|
||||
|
|
|
@ -14,6 +14,7 @@ class Singleton(abc.ABCMeta, type):
|
|||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
"""Call method for the singleton metaclass."""
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(
|
||||
Singleton, cls).__call__(
|
||||
|
@ -31,10 +32,11 @@ class Config(metaclass=Singleton):
|
|||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Config class"""
|
||||
self.debug = False
|
||||
self.continuous_mode = False
|
||||
self.speak_mode = False
|
||||
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
|
||||
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
|
@ -77,40 +79,56 @@ class Config(metaclass=Singleton):
|
|||
openai.api_key = self.openai_api_key
|
||||
|
||||
def set_continuous_mode(self, value: bool):
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_speak_mode(self, value: bool):
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_debug_mode(self, value: bool):
|
||||
self.debug_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str):
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str):
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int):
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int):
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_openai_api_key(self, value: str):
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str):
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_google_api_key(self, value: str):
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str):
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str):
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str):
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool):
|
||||
"""Set the debug mode value."""
|
||||
self.debug = value
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
from pathlib import Path
|
||||
|
||||
def load_prompt():
|
||||
"""Load the prompt from data/prompt.txt"""
|
||||
try:
|
||||
# get directory of this file:
|
||||
file_dir = Path(__file__).parent
|
||||
|
|
|
@ -24,7 +24,7 @@ COMMANDS:
|
|||
18. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
19. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
20. Generate Image: "generate_image", args: "prompt": "<prompt>"
|
||||
21. Do Nothing; command name: "do_nothing", args: ""
|
||||
21. Do Nothing: "do_nothing", args: ""
|
||||
|
||||
RESOURCES:
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ import os
|
|||
|
||||
|
||||
def execute_python_file(file):
|
||||
"""Execute a Python file in a Docker container and return the output"""
|
||||
workspace_folder = "auto_gpt_workspace"
|
||||
|
||||
print (f"Executing file '{file}' in workspace '{workspace_folder}'")
|
||||
|
|
|
@ -4,11 +4,13 @@ import os.path
|
|||
# Set a dedicated folder for file I/O
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
if not os.path.exists(working_directory):
|
||||
os.makedirs(working_directory)
|
||||
|
||||
|
||||
def safe_join(base, *paths):
|
||||
"""Join one or more path components intelligently."""
|
||||
new_path = os.path.join(base, *paths)
|
||||
norm_new_path = os.path.normpath(new_path)
|
||||
|
||||
|
@ -19,6 +21,7 @@ def safe_join(base, *paths):
|
|||
|
||||
|
||||
def read_file(filename):
|
||||
"""Read a file and return the contents"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
with open(filepath, "r") as f:
|
||||
|
@ -29,6 +32,7 @@ def read_file(filename):
|
|||
|
||||
|
||||
def write_to_file(filename, text):
|
||||
"""Write text to a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
directory = os.path.dirname(filepath)
|
||||
|
@ -42,6 +46,7 @@ def write_to_file(filename, text):
|
|||
|
||||
|
||||
def append_to_file(filename, text):
|
||||
"""Append text to a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
with open(filepath, "a") as f:
|
||||
|
@ -52,6 +57,7 @@ def append_to_file(filename, text):
|
|||
|
||||
|
||||
def delete_file(filename):
|
||||
"""Delete a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
os.remove(filepath)
|
||||
|
|
|
@ -30,6 +30,7 @@ def fix_and_parse_json(
|
|||
json_str: str,
|
||||
try_to_fix_with_gpt: bool = True
|
||||
) -> Union[str, Dict[Any, Any]]:
|
||||
"""Fix and parse JSON string"""
|
||||
try:
|
||||
json_str = json_str.replace('\t', '')
|
||||
return json.loads(json_str)
|
||||
|
@ -59,7 +60,8 @@ def fix_and_parse_json(
|
|||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly.")
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_str, JSON_SCHEMA, cfg.debug)
|
||||
ai_fixed_json = fix_json(json_str, JSON_SCHEMA)
|
||||
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
else:
|
||||
|
@ -71,13 +73,15 @@ def fix_and_parse_json(
|
|||
raise e
|
||||
|
||||
|
||||
def fix_json(json_str: str, schema: str, debug=False) -> str:
|
||||
def fix_json(json_str: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
|
||||
|
||||
# Try to fix the JSON using gpt:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
description_string = "Fixes the provided JSON string to make it parseable"\
|
||||
" and fully complient with the provided schema.\n If an object or"\
|
||||
" field specifed in the schema isn't contained within the correct"\
|
||||
" field specified in the schema isn't contained within the correct"\
|
||||
" JSON, it is ommited.\n This function is brilliant at guessing"\
|
||||
" when the format is incorrect."
|
||||
|
||||
|
@ -87,12 +91,13 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
|
|||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if debug:
|
||||
if cfg.debug:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
print(f"Fixed JSON: {result_string}")
|
||||
print("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
|
|
|
@ -6,6 +6,7 @@ openai.api_key = cfg.openai_api_key
|
|||
|
||||
# Overly simple abstraction until we create something better
|
||||
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
|
||||
"""Create a chat completion using the OpenAI API"""
|
||||
if cfg.use_azure:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=cfg.openai_deployment_id,
|
||||
|
|
|
@ -17,6 +17,18 @@ import traceback
|
|||
import yaml
|
||||
import argparse
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def check_openai_api_key():
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not cfg.openai_api_key:
|
||||
print(
|
||||
Fore.RED +
|
||||
"Please set your OpenAI API key in config.py or as an environment variable."
|
||||
)
|
||||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
|
||||
def print_to_console(
|
||||
title,
|
||||
|
@ -25,6 +37,7 @@ def print_to_console(
|
|||
speak_text=False,
|
||||
min_typing_speed=0.05,
|
||||
max_typing_speed=0.01):
|
||||
"""Prints text to the console with a typing effect"""
|
||||
global cfg
|
||||
if speak_text and cfg.speak_mode:
|
||||
speak.say_text(f"{title}. {content}")
|
||||
|
@ -70,6 +83,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
|||
return json_string
|
||||
|
||||
def print_assistant_thoughts(assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
global ai_name
|
||||
global cfg
|
||||
try:
|
||||
|
@ -137,7 +151,7 @@ def print_assistant_thoughts(assistant_reply):
|
|||
|
||||
|
||||
def load_variables(config_file="config.yaml"):
|
||||
# Load variables from yaml file if it exists
|
||||
"""Load variables from yaml file if it exists, otherwise prompt the user for input"""
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
@ -191,6 +205,7 @@ def load_variables(config_file="config.yaml"):
|
|||
|
||||
|
||||
def construct_prompt():
|
||||
"""Construct the prompt for the AI to respond to"""
|
||||
config = AIConfig.load()
|
||||
if config.ai_name:
|
||||
print_to_console(
|
||||
|
@ -219,6 +234,7 @@ Continue (y/n): """)
|
|||
|
||||
|
||||
def prompt_user():
|
||||
"""Prompt the user for input"""
|
||||
ai_name = ""
|
||||
# Construct the prompt
|
||||
print_to_console(
|
||||
|
@ -271,6 +287,7 @@ def prompt_user():
|
|||
return config
|
||||
|
||||
def parse_arguments():
|
||||
"""Parses the arguments passed to the script"""
|
||||
global cfg
|
||||
cfg.set_continuous_mode(False)
|
||||
cfg.set_speak_mode(False)
|
||||
|
@ -294,6 +311,10 @@ def parse_arguments():
|
|||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_speak_mode(True)
|
||||
|
||||
if args.debug:
|
||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||
|
@ -304,7 +325,7 @@ def parse_arguments():
|
|||
|
||||
|
||||
# TODO: fill in llm values here
|
||||
|
||||
check_openai_api_key()
|
||||
cfg = Config()
|
||||
parse_arguments()
|
||||
ai_name = ""
|
||||
|
|
|
@ -15,6 +15,7 @@ tts_headers = {
|
|||
}
|
||||
|
||||
def eleven_labs_speech(text, voice_index=0):
|
||||
"""Speak text using elevenlabs.io's API"""
|
||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||
voice_id=voices[voice_index])
|
||||
formatted_message = {"text": text}
|
||||
|
|
|
@ -5,7 +5,9 @@ import time
|
|||
|
||||
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
def __init__(self, message="Loading...", delay=0.1):
|
||||
"""Initialize the spinner class"""
|
||||
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
|
@ -13,6 +15,7 @@ class Spinner:
|
|||
self.spinner_thread = None
|
||||
|
||||
def spin(self):
|
||||
"""Spin the spinner"""
|
||||
while self.running:
|
||||
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
|
||||
sys.stdout.flush()
|
||||
|
@ -20,11 +23,13 @@ class Spinner:
|
|||
sys.stdout.write('\b' * (len(self.message) + 2))
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
self.running = True
|
||||
self.spinner_thread = threading.Thread(target=self.spin)
|
||||
self.spinner_thread.start()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
"""Stop the spinner"""
|
||||
self.running = False
|
||||
self.spinner_thread.join()
|
||||
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
|
||||
|
|
Loading…
Reference in New Issue