resolved latest conflicts
commit
5592dbd277
|
@ -26,4 +26,4 @@ WEAVIATE_USERNAME=
|
|||
WEAVIATE_PASSWORD=
|
||||
WEAVIATE_API_KEY=
|
||||
MEMORY_INDEX="auto-gpt"
|
||||
MEMORY_BACKEND="local"
|
||||
MEMORY_BACKEND=local
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: Unit Tests
|
||||
name: Python CI
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -30,6 +30,10 @@ jobs:
|
|||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Lint with flake8
|
||||
continue-on-error: true
|
||||
run: flake8 scripts/ tests/
|
||||
|
||||
- name: Run unittest tests with coverage
|
||||
run: |
|
||||
coverage run --source=scripts -m unittest discover tests
|
|
@ -18,4 +18,4 @@ log.txt
|
|||
# Coverage reports
|
||||
.coverage
|
||||
coverage.xml
|
||||
htmlcov/
|
||||
htmlcov/
|
||||
|
|
12
Dockerfile
12
Dockerfile
|
@ -1,9 +1,7 @@
|
|||
FROM python:3.11
|
||||
|
||||
FROM python:3.11-slim
|
||||
ENV PIP_NO_CACHE_DIR=yes
|
||||
WORKDIR /app
|
||||
COPY scripts/ /app
|
||||
COPY requirements.txt /app
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
CMD ["python", "main.py"]
|
||||
COPY scripts/ .
|
||||
ENTRYPOINT ["python", "main.py"]
|
||||
|
|
36
README.md
36
README.md
|
@ -96,9 +96,10 @@ pip install -r requirements.txt
|
|||
```
|
||||
|
||||
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
|
||||
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
|
||||
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
|
||||
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively
|
||||
|
||||
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
|
||||
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
|
||||
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively
|
||||
|
||||
## 🔧 Usage
|
||||
|
||||
|
@ -112,13 +113,22 @@ python scripts/main.py
|
|||
2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue.
|
||||
3. To exit the program, type "exit" and press Enter.
|
||||
|
||||
### Logs
|
||||
|
||||
You will find activity and error logs in the folder `./logs`
|
||||
|
||||
To output debug logs:
|
||||
|
||||
```
|
||||
python scripts/main.py --debug
|
||||
```
|
||||
|
||||
## 🗣️ Speech Mode
|
||||
|
||||
Use this to use TTS for Auto-GPT
|
||||
|
||||
```
|
||||
python scripts/main.py --speak
|
||||
|
||||
```
|
||||
|
||||
## 🔍 Google API Keys Configuration
|
||||
|
@ -245,6 +255,16 @@ USE_WEAVIATE_EMBEDDED=False # set to True to run Embedded Weaviate
|
|||
MEMORY_INDEX="Autogpt" # name of the index to create for the application
|
||||
```
|
||||
|
||||
## Setting Your Cache Type
|
||||
|
||||
By default Auto-GPT is going to use LocalCache instead of redis or Pinecone.
|
||||
|
||||
To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want:
|
||||
|
||||
`local` (default) uses a local JSON cache file
|
||||
`pinecone` uses the Pinecone.io account you configured in your ENV settings
|
||||
`redis` will use the redis cache that you configured
|
||||
|
||||
## View Memory Usage
|
||||
|
||||
1. View memory usage by using the `--debug` flag :)
|
||||
|
@ -335,3 +355,11 @@ To run tests and see coverage, run the following command:
|
|||
```
|
||||
coverage run -m unittest discover tests
|
||||
```
|
||||
|
||||
## Run linter
|
||||
|
||||
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command:
|
||||
|
||||
```
|
||||
flake8 scripts/ tests/
|
||||
```
|
|
@ -17,3 +17,4 @@ orjson
|
|||
Pillow
|
||||
weaviate-client==3.15.5
|
||||
coverage
|
||||
flake8
|
||||
|
|
|
@ -4,6 +4,8 @@ from dotenv import load_dotenv
|
|||
from config import Config
|
||||
import token_counter
|
||||
from llm_utils import create_chat_completion
|
||||
from logger import logger
|
||||
import logging
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
@ -64,15 +66,12 @@ def chat_with_ai(
|
|||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
if cfg.debug_mode:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
||||
|
||||
if cfg.debug_mode:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
|
@ -110,19 +109,17 @@ def chat_with_ai(
|
|||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if cfg.debug_mode:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
print("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
print(
|
||||
f"{message['role'].capitalize()}: {message['content']}")
|
||||
print()
|
||||
print("----------- END OF CONTEXT ----------------")
|
||||
logger.debug(f"Token limit: {token_limit}")
|
||||
logger.debug(f"Send Token Count: {current_tokens_used}")
|
||||
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
|
||||
logger.debug("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
|
||||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
|
|
|
@ -3,6 +3,7 @@ from typing import Any, Dict, Union
|
|||
from call_ai_function import call_ai_function
|
||||
from config import Config
|
||||
from json_utils import correct_json
|
||||
from logger import logger
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
@ -56,7 +57,7 @@ def fix_and_parse_json(
|
|||
# Can throw a ValueError if there is no "{" or "}" in the json_str
|
||||
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
||||
if try_to_fix_with_gpt:
|
||||
print("Warning: Failed to parse AI output, attempting to fix."
|
||||
logger.warn("Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly.")
|
||||
|
@ -68,7 +69,7 @@ def fix_and_parse_json(
|
|||
else:
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
print("Failed to fix AI output, telling the AI.")
|
||||
logger.error("Failed to fix AI output, telling the AI.")
|
||||
return json_str
|
||||
else:
|
||||
raise e
|
||||
|
@ -76,7 +77,6 @@ def fix_and_parse_json(
|
|||
|
||||
def fix_json(json_str: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
|
||||
|
||||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
|
@ -92,12 +92,11 @@ def fix_json(json_str: str, schema: str) -> str:
|
|||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if cfg.debug_mode:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
print(f"Fixed JSON: {result_string}")
|
||||
print("----------- END OF FIX ATTEMPT ----------------")
|
||||
logger.debug("------------ JSON FIX ATTEMPT ---------------")
|
||||
logger.debug(f"Original JSON: {json_str}")
|
||||
logger.debug("-----------")
|
||||
logger.debug(f"Fixed JSON: {result_string}")
|
||||
logger.debug("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
from colorama import Fore
|
||||
|
||||
from colorama import Style
|
||||
|
||||
import speak
|
||||
from config import Config
|
||||
from config import Singleton
|
||||
|
||||
cfg = Config()
|
||||
|
||||
'''
|
||||
Logger that handle titles in different colors.
|
||||
Outputs logs in console, activity.log, and errors.log
|
||||
For console handler: simulates typing
|
||||
'''
|
||||
|
||||
|
||||
class Logger(metaclass=Singleton):
|
||||
def __init__(self):
|
||||
# create log directory if it doesn't exist
|
||||
log_dir = os.path.join('..', 'logs')
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
log_file = "activity.log"
|
||||
error_file = "error.log"
|
||||
|
||||
console_formatter = AutoGptFormatter('%(title_color)s %(message)s')
|
||||
|
||||
# Create a handler for console which simulate typing
|
||||
self.typing_console_handler = TypingConsoleHandler()
|
||||
self.typing_console_handler.setLevel(logging.INFO)
|
||||
self.typing_console_handler.setFormatter(console_formatter)
|
||||
|
||||
# Create a handler for console without typing simulation
|
||||
self.console_handler = ConsoleHandler()
|
||||
self.console_handler.setLevel(logging.DEBUG)
|
||||
self.console_handler.setFormatter(console_formatter)
|
||||
|
||||
# Info handler in activity.log
|
||||
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
|
||||
self.file_handler.setLevel(logging.DEBUG)
|
||||
info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s')
|
||||
self.file_handler.setFormatter(info_formatter)
|
||||
|
||||
# Error handler error.log
|
||||
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
|
||||
error_handler.setLevel(logging.ERROR)
|
||||
error_formatter = AutoGptFormatter(
|
||||
'%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s')
|
||||
error_handler.setFormatter(error_formatter)
|
||||
|
||||
self.typing_logger = logging.getLogger('TYPER')
|
||||
self.typing_logger.addHandler(self.typing_console_handler)
|
||||
self.typing_logger.addHandler(self.file_handler)
|
||||
self.typing_logger.addHandler(error_handler)
|
||||
self.typing_logger.setLevel(logging.DEBUG)
|
||||
|
||||
self.logger = logging.getLogger('LOGGER')
|
||||
self.logger.addHandler(self.console_handler)
|
||||
self.logger.addHandler(self.file_handler)
|
||||
self.logger.addHandler(error_handler)
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
def typewriter_log(
|
||||
self,
|
||||
title='',
|
||||
title_color='',
|
||||
content='',
|
||||
speak_text=False,
|
||||
level=logging.INFO):
|
||||
if speak_text and cfg.speak_mode:
|
||||
speak.say_text(f"{title}. {content}")
|
||||
|
||||
if content:
|
||||
if isinstance(content, list):
|
||||
content = " ".join(content)
|
||||
else:
|
||||
content = ""
|
||||
|
||||
self.typing_logger.log(level, content, extra={'title': title, 'color': title_color})
|
||||
|
||||
def debug(
|
||||
self,
|
||||
message,
|
||||
title='',
|
||||
title_color='',
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
title='',
|
||||
title_color='',
|
||||
):
|
||||
self._log(title, title_color, message, logging.WARN)
|
||||
|
||||
def error(
|
||||
self,
|
||||
title,
|
||||
message=''
|
||||
):
|
||||
self._log(title, Fore.RED, message, logging.ERROR)
|
||||
|
||||
def _log(
|
||||
self,
|
||||
title='',
|
||||
title_color='',
|
||||
message='',
|
||||
level=logging.INFO):
|
||||
if message:
|
||||
if isinstance(message, list):
|
||||
message = " ".join(message)
|
||||
self.logger.log(level, message, extra={'title': title, 'color': title_color})
|
||||
|
||||
def set_level(self, level):
|
||||
self.logger.setLevel(level)
|
||||
self.typing_logger.setLevel(level)
|
||||
|
||||
|
||||
'''
|
||||
Output stream to console using simulated typing
|
||||
'''
|
||||
|
||||
|
||||
class TypingConsoleHandler(logging.StreamHandler):
|
||||
def emit(self, record):
|
||||
min_typing_speed = 0.05
|
||||
max_typing_speed = 0.01
|
||||
|
||||
msg = self.format(record)
|
||||
try:
|
||||
words = msg.split()
|
||||
for i, word in enumerate(words):
|
||||
print(word, end="", flush=True)
|
||||
if i < len(words) - 1:
|
||||
print(" ", end="", flush=True)
|
||||
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
||||
time.sleep(typing_speed)
|
||||
# type faster after each word
|
||||
min_typing_speed = min_typing_speed * 0.95
|
||||
max_typing_speed = max_typing_speed * 0.95
|
||||
print()
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
class ConsoleHandler(logging.StreamHandler):
|
||||
def emit(self, record):
|
||||
msg = self.format(record)
|
||||
try:
|
||||
print(msg)
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
'''
|
||||
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
||||
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
||||
'''
|
||||
|
||||
|
||||
class AutoGptFormatter(logging.Formatter):
|
||||
def format(self, record: LogRecord) -> str:
|
||||
if (hasattr(record, 'color')):
|
||||
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL
|
||||
else:
|
||||
record.title_color = getattr(record, 'title')
|
||||
if hasattr(record, 'msg'):
|
||||
record.message_no_color = remove_color_codes(getattr(record, 'msg'))
|
||||
else:
|
||||
record.message_no_color = ''
|
||||
return super().format(record)
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
||||
return ansi_escape.sub('', s)
|
||||
|
||||
|
||||
logger = Logger()
|
122
scripts/main.py
122
scripts/main.py
|
@ -2,7 +2,7 @@ import json
|
|||
import random
|
||||
import commands as cmd
|
||||
import utils
|
||||
from memory import get_memory
|
||||
from memory import get_memory, get_supported_memory_backends
|
||||
import data
|
||||
import chat
|
||||
from colorama import Fore, Style
|
||||
|
@ -15,18 +15,11 @@ from ai_config import AIConfig
|
|||
import traceback
|
||||
import yaml
|
||||
import argparse
|
||||
from logger import logger
|
||||
import logging
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def configure_logging():
|
||||
logging.basicConfig(filename='log.txt',
|
||||
filemode='a',
|
||||
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
|
||||
datefmt='%H:%M:%S',
|
||||
level=logging.DEBUG)
|
||||
return logging.getLogger('AutoGPT')
|
||||
|
||||
def check_openai_api_key():
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not cfg.openai_api_key:
|
||||
|
@ -37,39 +30,10 @@ def check_openai_api_key():
|
|||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
def print_to_console(
|
||||
title,
|
||||
title_color,
|
||||
content,
|
||||
speak_text=False,
|
||||
min_typing_speed=0.05,
|
||||
max_typing_speed=0.01):
|
||||
"""Prints text to the console with a typing effect"""
|
||||
global cfg
|
||||
global logger
|
||||
if speak_text and cfg.speak_mode:
|
||||
speak.say_text(f"{title}. {content}")
|
||||
print(title_color + title + " " + Style.RESET_ALL, end="")
|
||||
if content:
|
||||
logger.info(title + ': ' + content)
|
||||
if isinstance(content, list):
|
||||
content = " ".join(content)
|
||||
words = content.split()
|
||||
for i, word in enumerate(words):
|
||||
print(word, end="", flush=True)
|
||||
if i < len(words) - 1:
|
||||
print(" ", end="", flush=True)
|
||||
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
||||
time.sleep(typing_speed)
|
||||
# type faster after each word
|
||||
min_typing_speed = min_typing_speed * 0.95
|
||||
max_typing_speed = max_typing_speed * 0.95
|
||||
print()
|
||||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
|
||||
print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "")
|
||||
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
# Use regex to search for JSON objects
|
||||
|
@ -80,7 +44,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
|||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
print_to_console("Apparently json was fixed.", Fore.GREEN,"")
|
||||
logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN)
|
||||
if cfg.speak_mode and cfg.debug_mode:
|
||||
speak.say_text("Apparently json was fixed.")
|
||||
else:
|
||||
|
@ -89,7 +53,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
|||
except (json.JSONDecodeError, ValueError) as e:
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("Didn't work. I will have to ignore this response then.")
|
||||
print_to_console("Error: Invalid JSON, setting it to empty JSON now.\n", Fore.RED, "")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
json_string = {}
|
||||
|
||||
return json_string
|
||||
|
@ -103,7 +67,7 @@ def print_assistant_thoughts(assistant_reply):
|
|||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
except json.JSONDecodeError as e:
|
||||
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply)
|
||||
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
||||
|
||||
|
@ -112,7 +76,7 @@ def print_assistant_thoughts(assistant_reply):
|
|||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply)
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
|
@ -128,11 +92,11 @@ def print_assistant_thoughts(assistant_reply):
|
|||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
||||
print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
|
@ -143,23 +107,23 @@ def print_assistant_thoughts(assistant_reply):
|
|||
lines = assistant_thoughts_plan.split('\n')
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
print_to_console("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
|
||||
print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
||||
# Speak the assistant's thoughts
|
||||
if cfg.speak_mode and assistant_thoughts_speak:
|
||||
speak.say_text(assistant_thoughts_speak)
|
||||
|
||||
|
||||
return assistant_reply_json
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
|
||||
logger.error("Error: Invalid JSON\n", assistant_reply)
|
||||
if cfg.speak_mode:
|
||||
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
call_stack = traceback.format_exc()
|
||||
print_to_console("Error: \n", Fore.RED, call_stack)
|
||||
logger.error("Error: \n", call_stack)
|
||||
|
||||
|
||||
def load_variables(config_file="config.yaml"):
|
||||
|
@ -220,7 +184,7 @@ def construct_prompt():
|
|||
"""Construct the prompt for the AI to respond to"""
|
||||
config = AIConfig.load()
|
||||
if config.ai_name:
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
f"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
|
@ -249,14 +213,14 @@ def prompt_user():
|
|||
"""Prompt the user for input"""
|
||||
ai_name = ""
|
||||
# Construct the prompt
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load defaults.",
|
||||
speak_text=True)
|
||||
|
||||
# Get AI Name from User
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'Entrepreneur-GPT'")
|
||||
|
@ -264,14 +228,14 @@ def prompt_user():
|
|||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
f"{ai_name} here!",
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"I am at your service.",
|
||||
speak_text=True)
|
||||
|
||||
# Get AI Role from User
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
||||
|
@ -280,7 +244,7 @@ def prompt_user():
|
|||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
||||
# Enter up to 5 goals for the AI
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
|
@ -311,38 +275,52 @@ def parse_arguments():
|
|||
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
||||
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
||||
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
|
||||
parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
if args.continuous:
|
||||
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
print_to_console(
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
|
||||
cfg.set_continuous_mode(True)
|
||||
|
||||
if args.speak:
|
||||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_speak_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||
|
||||
|
||||
if args.gpt4only:
|
||||
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
||||
|
||||
if args.debug:
|
||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
if args.memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = args.memory_type
|
||||
if not chosen in supported_memory:
|
||||
print_to_console("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}')
|
||||
print_to_console(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
|
||||
else:
|
||||
cfg.memory_backend = chosen
|
||||
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
cfg = Config()
|
||||
logger = configure_logging()
|
||||
parse_arguments()
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
ai_name = ""
|
||||
prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
|
@ -378,14 +356,14 @@ while True:
|
|||
if cfg.speak_mode:
|
||||
speak.say_text(f"I want to execute {command_name}")
|
||||
except Exception as e:
|
||||
print_to_console("Error: \n", Fore.RED, str(e))
|
||||
logger.error("Error: \n", str(e))
|
||||
|
||||
if not cfg.continuous_mode and next_action_count == 0:
|
||||
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
user_input = ""
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
|
@ -394,7 +372,7 @@ while True:
|
|||
flush=True)
|
||||
while True:
|
||||
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
if console_input.lower() == "y":
|
||||
if console_input.lower().rstrip() == "y":
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
|
@ -414,7 +392,7 @@ while True:
|
|||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"")
|
||||
|
@ -423,7 +401,7 @@ while True:
|
|||
break
|
||||
else:
|
||||
# Print command
|
||||
print_to_console(
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
|
@ -448,9 +426,9 @@ while True:
|
|||
# history
|
||||
if result is not None:
|
||||
full_message_history.append(chat.create_chat_message("system", result))
|
||||
print_to_console("SYSTEM: ", Fore.YELLOW, result)
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
full_message_history.append(
|
||||
chat.create_chat_message(
|
||||
"system", "Unable to execute command"))
|
||||
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
|
|
|
@ -1,12 +1,19 @@
|
|||
from memory.local import LocalCache
|
||||
|
||||
# List of supported memory backends
|
||||
# Add a backend to this list if the import attempt is successful
|
||||
supported_memory = ['local']
|
||||
|
||||
try:
|
||||
from memory.redismem import RedisMemory
|
||||
supported_memory.append('redis')
|
||||
except ImportError:
|
||||
print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from memory.pinecone import PineconeMemory
|
||||
supported_memory.append('pinecone')
|
||||
except ImportError:
|
||||
print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
@ -46,6 +53,8 @@ def get_memory(cfg, init=False):
|
|||
memory.clear()
|
||||
return memory
|
||||
|
||||
def get_supported_memory_backends():
|
||||
return supported_memory
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
|
|
|
@ -15,7 +15,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
|
|||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
|
|
|
@ -2,4 +2,4 @@ import sys
|
|||
import os
|
||||
|
||||
sys.path.insert(0, os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), '../scripts')))
|
||||
os.path.join(os.path.dirname(__file__), '../scripts')))
|
||||
|
|
Loading…
Reference in New Issue