feat(global): errors logs are logged as debug level and activated with program argument 'debug'

pull/981/head
Fabrice Hong 2023-04-08 01:05:08 +02:00
parent 0e004f5c14
commit 43c006d71c
5 changed files with 148 additions and 26 deletions

View File

@ -112,13 +112,20 @@ python scripts/main.py
2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue.
3. To exit the program, type "exit" and press Enter.
### Logs
You will find activity and error logs in the folder ```./logs```
To output debug logs:
```
python scripts/main.py --debug
```
## 🗣️ Speech Mode
Use this to use TTS for Auto-GPT
```
python scripts/main.py --speak
```
## 🔍 Google API Keys Configuration

View File

@ -4,6 +4,8 @@ from dotenv import load_dotenv
from config import Config
import token_counter
from llm_utils import create_chat_completion
from logger import logger
import logging
cfg = Config()
@ -64,15 +66,12 @@ def chat_with_ai(
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
send_token_limit = token_limit - 1000
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
if cfg.debug_mode:
print('Memory Stats: ', permanent_memory.get_stats())
logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG)
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
prompt, relevant_memory, full_message_history, model)
@ -110,19 +109,17 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
if cfg.debug_mode:
print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}")
print("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
print(
f"{message['role'].capitalize()}: {message['content']}")
print()
print("----------- END OF CONTEXT ----------------")
logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG)
logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG)
logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG)
logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG)
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG)
logger.log(content="", level=logging.DEBUG)
logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG)
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion(

View File

@ -76,7 +76,6 @@ def fix_and_parse_json(
def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"]
@ -92,12 +91,11 @@ def fix_json(json_str: str, schema: str) -> str:
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if cfg.debug_mode:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print("-----------")
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
logger.log(content="------------ JSON FIX ATTEMPT ---------------", level=logging.DEBUG)
logger.log(content=f"Original JSON: {json_str}", level=logging.DEBUG)
logger.log(content="-----------", level=logging.DEBUG)
logger.log(content=f"Fixed JSON: {result_string}", level=logging.DEBUG)
logger.log(content="----------- END OF FIX ATTEMPT ----------------", level=logging.DEBUG)
try:
json.loads(result_string) # just check the validity

115
scripts/logger.py Normal file
View File

@ -0,0 +1,115 @@
import logging
import os
import random
import time
from logging import LogRecord
from colorama import Style
import speak
from config import Config
from config import Singleton
import re
cfg = Config()
class Logger(metaclass=Singleton):
def __init__(self):
# create log directory if it doesn't exist
log_dir = os.path.join('..', 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = "activity.log"
error_file = "error.log"
# Create a handler for INFO level logs
self.console_handler = TypingConsoleHandler()
self.console_handler.setLevel(logging.INFO)
console_formatter = AutoGptFormatter('%(title_color)s %(message)s')
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
self.file_handler.setLevel(logging.INFO)
info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s')
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
'%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s')
error_handler.setFormatter(error_formatter)
self.logger = logging.getLogger(__name__)
self.logger.addHandler(self.console_handler)
self.logger.addHandler(self.file_handler)
self.logger.addHandler(error_handler)
def log(
self,
title='',
title_color='',
content='',
speak_text=False,
level=logging.INFO):
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
if content:
if isinstance(content, list):
content = " ".join(content)
else:
content = ""
self.logger.log(level, content, extra={'title': title, 'color': title_color})
def set_level(self, level):
self.logger.setLevel(level)
self.console_handler.setLevel(level)
self.file_handler.setLevel(level)
class TypingConsoleHandler(logging.StreamHandler):
def emit(self, record):
min_typing_speed = 0.05
max_typing_speed = 0.01
msg = self.format(record)
try:
words = msg.split()
for i, word in enumerate(words):
print(word, end="", flush=True)
if i < len(words) - 1:
print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
time.sleep(typing_speed)
# type faster after each word
min_typing_speed = min_typing_speed * 0.95
max_typing_speed = max_typing_speed * 0.95
print()
except Exception:
self.handleError(record)
class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL
if hasattr(record, 'msg'):
record.message_no_color = remove_color_codes(getattr(record, 'msg'))
else:
record.message_no_color = ''
return super().format(record)
def remove_color_codes(s: str) -> str:
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
return ansi_escape.sub('', s)
logger = Logger()

View File

@ -313,6 +313,10 @@ def parse_arguments():
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
args = parser.parse_args()
if args.debug:
logger.log("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.continuous:
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
print_to_console(
@ -343,6 +347,7 @@ check_openai_api_key()
cfg = Config()
logger = configure_logging()
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
# print(prompt)