From 10b7d8e376b168f76d176f357fb90f31255d0b88 Mon Sep 17 00:00:00 2001 From: crimson-knight Date: Tue, 11 Apr 2023 07:46:25 -0400 Subject: [PATCH 01/17] Adds information on how to use the other cache methods available --- .env.template | 1 + README.md | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/.env.template b/.env.template index 01735615c..1ff98a6fd 100644 --- a/.env.template +++ b/.env.template @@ -13,3 +13,4 @@ OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure IMAGE_PROVIDER=dalle HUGGINGFACE_API_TOKEN= USE_MAC_OS_TTS=False +MEMORY_BACKEND=local diff --git a/README.md b/README.md index 749c87915..aa083689a 100644 --- a/README.md +++ b/README.md @@ -204,6 +204,15 @@ export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp ``` +## Setting Your Cache Type + +By default Auto-GPT is going to use LocalCache instead of redis or Pinecone. + +To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: + +`local` (default) uses a local JSON cache file +`pinecone` uses the Pinecone.io account you configured in your ENV settings +`redis` will use the redis cache that you configured ## View Memory Usage From 739b0ed96bb5e72d47730359f9a277a2402673b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kiss=20P=C3=A9ter?= Date: Tue, 11 Apr 2023 14:04:37 +0200 Subject: [PATCH 02/17] Improve Dockerfile: - Use smaller base image - Make it smaller by not saving cache (1,15GB -> 356MB) --- Dockerfile | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 146a37471..4d264c88c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,7 @@ -FROM python:3.11 - +FROM python:3.11-slim +ENV PIP_NO_CACHE_DIR=yes WORKDIR /app -COPY scripts/ /app -COPY requirements.txt /app - +COPY requirements.txt . RUN pip install -r requirements.txt - -CMD ["python", "main.py"] +COPY scripts/ . +ENTRYPOINT ["python", "main.py"] From dbb78b636f8c91a651bfbea5ebdd06e4df6cae55 Mon Sep 17 00:00:00 2001 From: PierreBastiani Date: Tue, 11 Apr 2023 14:57:29 +0100 Subject: [PATCH 03/17] check for authorise 'y' without trailing space --- scripts/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main.py b/scripts/main.py index d84e15085..87a8f4fab 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -348,7 +348,7 @@ while True: flush=True) while True: console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": + if console_input.lower().rstrip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().startswith("y -"): From 083ccb6bd36957f69c40a3103986ab37457920a1 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Wed, 12 Apr 2023 16:58:39 +0300 Subject: [PATCH 04/17] Added a memory backend argument --- scripts/main.py | 12 +++++++++++- scripts/memory/__init__.py | 8 +++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index 15af0c381..c5963635b 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -2,7 +2,7 @@ import json import random import commands as cmd import utils -from memory import get_memory +from memory import get_memory, get_supported_memory_backends import data import chat from colorama import Fore, Style @@ -276,6 +276,7 @@ def parse_arguments(): parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') + parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use') args = parser.parse_args() if args.continuous: @@ -302,6 +303,15 @@ def parse_arguments(): print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") cfg.set_debug_mode(True) + if args.memory_type: + supported_memory = get_supported_memory_backends() + chosen = args.memory_type + if not chosen in supported_memory: + print_to_console("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}') + print_to_console(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend) + else: + cfg.memory_backend = chosen + # TODO: fill in llm values here check_openai_api_key() diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index a441a46aa..f4ba5206d 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -1,17 +1,21 @@ from memory.local import LocalCache + +supported_memory = ['local'] + try: from memory.redismem import RedisMemory + supported_memory.append('redis') except ImportError: print("Redis not installed. Skipping import.") RedisMemory = None try: from memory.pinecone import PineconeMemory + supported_memory.append('pinecone') except ImportError: print("Pinecone not installed. Skipping import.") PineconeMemory = None - def get_memory(cfg, init=False): memory = None if cfg.memory_backend == "pinecone": @@ -35,6 +39,8 @@ def get_memory(cfg, init=False): memory.clear() return memory +def get_supported_memory_backends(): + return supported_memory __all__ = [ "get_memory", From ec6918ab48626311c9d3cc096e5d8d111f690b52 Mon Sep 17 00:00:00 2001 From: Eesa Hamza Date: Wed, 12 Apr 2023 16:59:50 +0300 Subject: [PATCH 05/17] Added some comments --- scripts/memory/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index f4ba5206d..2900353ed 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -1,5 +1,7 @@ from memory.local import LocalCache +# List of supported memory backends +# Add a backend to this list if the import attempt is successful supported_memory = ['local'] try: From 43c006d71cb40ec7c58e27210a5f2bc474f9e0b7 Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Sat, 8 Apr 2023 01:05:08 +0200 Subject: [PATCH 06/17] feat(global): errors logs are logged as debug level and activated with program argument 'debug' --- README.md | 9 +++- scripts/chat.py | 33 ++++++------ scripts/json_parser.py | 12 ++--- scripts/logger.py | 115 +++++++++++++++++++++++++++++++++++++++++ scripts/main.py | 5 ++ 5 files changed, 148 insertions(+), 26 deletions(-) create mode 100644 scripts/logger.py diff --git a/README.md b/README.md index 5507a53c4..dc3386f58 100644 --- a/README.md +++ b/README.md @@ -112,13 +112,20 @@ python scripts/main.py 2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue. 3. To exit the program, type "exit" and press Enter. +### Logs +You will find activity and error logs in the folder ```./logs``` + +To output debug logs: +``` +python scripts/main.py --debug +``` + ## 🗣️ Speech Mode Use this to use TTS for Auto-GPT ``` python scripts/main.py --speak - ``` ## 🔍 Google API Keys Configuration diff --git a/scripts/chat.py b/scripts/chat.py index 2f76e8e21..8d5245141 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -4,6 +4,8 @@ from dotenv import load_dotenv from config import Config import token_counter from llm_utils import create_chat_completion +from logger import logger +import logging cfg = Config() @@ -64,15 +66,12 @@ def chat_with_ai( model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - if cfg.debug_mode: - print(f"Token limit: {token_limit}") - + logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) send_token_limit = token_limit - 1000 relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - if cfg.debug_mode: - print('Memory Stats: ', permanent_memory.get_stats()) + logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG) next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( prompt, relevant_memory, full_message_history, model) @@ -110,19 +109,17 @@ def chat_with_ai( # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # Debug print the current context - if cfg.debug_mode: - print(f"Token limit: {token_limit}") - print(f"Send Token Count: {current_tokens_used}") - print(f"Tokens remaining for response: {tokens_remaining}") - print("------------ CONTEXT SENT TO AI ---------------") - for message in current_context: - # Skip printing the prompt - if message["role"] == "system" and message["content"] == prompt: - continue - print( - f"{message['role'].capitalize()}: {message['content']}") - print() - print("----------- END OF CONTEXT ----------------") + logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) + logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG) + logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG) + logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG) + for message in current_context: + # Skip printing the prompt + if message["role"] == "system" and message["content"] == prompt: + continue + logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG) + logger.log(content="", level=logging.DEBUG) + logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG) # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about assistant_reply = create_chat_completion( diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 552442555..c70e66f40 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -76,7 +76,6 @@ def fix_and_parse_json( def fix_json(json_str: str, schema: str) -> str: """Fix the given JSON string to make it parseable and fully compliant with the provided schema.""" - # Try to fix the JSON using GPT: function_string = "def fix_json(json_str: str, schema:str=None) -> str:" args = [f"'''{json_str}'''", f"'''{schema}'''"] @@ -92,12 +91,11 @@ def fix_json(json_str: str, schema: str) -> str: result_string = call_ai_function( function_string, args, description_string, model=cfg.fast_llm_model ) - if cfg.debug_mode: - print("------------ JSON FIX ATTEMPT ---------------") - print(f"Original JSON: {json_str}") - print("-----------") - print(f"Fixed JSON: {result_string}") - print("----------- END OF FIX ATTEMPT ----------------") + logger.log(content="------------ JSON FIX ATTEMPT ---------------", level=logging.DEBUG) + logger.log(content=f"Original JSON: {json_str}", level=logging.DEBUG) + logger.log(content="-----------", level=logging.DEBUG) + logger.log(content=f"Fixed JSON: {result_string}", level=logging.DEBUG) + logger.log(content="----------- END OF FIX ATTEMPT ----------------", level=logging.DEBUG) try: json.loads(result_string) # just check the validity diff --git a/scripts/logger.py b/scripts/logger.py new file mode 100644 index 000000000..9347ca626 --- /dev/null +++ b/scripts/logger.py @@ -0,0 +1,115 @@ +import logging +import os +import random +import time +from logging import LogRecord + +from colorama import Style + +import speak +from config import Config +from config import Singleton + +import re + +cfg = Config() + + +class Logger(metaclass=Singleton): + def __init__(self): + # create log directory if it doesn't exist + log_dir = os.path.join('..', 'logs') + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + log_file = "activity.log" + error_file = "error.log" + + # Create a handler for INFO level logs + self.console_handler = TypingConsoleHandler() + self.console_handler.setLevel(logging.INFO) + console_formatter = AutoGptFormatter('%(title_color)s %(message)s') + self.console_handler.setFormatter(console_formatter) + + # Info handler in activity.log + self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) + self.file_handler.setLevel(logging.INFO) + info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s') + self.file_handler.setFormatter(info_formatter) + + # Error handler error.log + error_handler = logging.FileHandler(os.path.join(log_dir, error_file)) + error_handler.setLevel(logging.ERROR) + error_formatter = AutoGptFormatter( + '%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s') + error_handler.setFormatter(error_formatter) + + self.logger = logging.getLogger(__name__) + self.logger.addHandler(self.console_handler) + self.logger.addHandler(self.file_handler) + self.logger.addHandler(error_handler) + + def log( + self, + title='', + title_color='', + content='', + speak_text=False, + level=logging.INFO): + if speak_text and cfg.speak_mode: + speak.say_text(f"{title}. {content}") + + if content: + if isinstance(content, list): + content = " ".join(content) + else: + content = "" + + self.logger.log(level, content, extra={'title': title, 'color': title_color}) + + def set_level(self, level): + self.logger.setLevel(level) + self.console_handler.setLevel(level) + self.file_handler.setLevel(level) + + +class TypingConsoleHandler(logging.StreamHandler): + def emit(self, record): + min_typing_speed = 0.05 + max_typing_speed = 0.01 + + msg = self.format(record) + try: + words = msg.split() + for i, word in enumerate(words): + print(word, end="", flush=True) + if i < len(words) - 1: + print(" ", end="", flush=True) + typing_speed = random.uniform(min_typing_speed, max_typing_speed) + time.sleep(typing_speed) + # type faster after each word + min_typing_speed = min_typing_speed * 0.95 + max_typing_speed = max_typing_speed * 0.95 + print() + except Exception: + self.handleError(record) + + + + +class AutoGptFormatter(logging.Formatter): + def format(self, record: LogRecord) -> str: + record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL + if hasattr(record, 'msg'): + record.message_no_color = remove_color_codes(getattr(record, 'msg')) + else: + record.message_no_color = '' + return super().format(record) + + +def remove_color_codes(s: str) -> str: + ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') + return ansi_escape.sub('', s) + + +logger = Logger() diff --git a/scripts/main.py b/scripts/main.py index 7a48d0e2f..647ca7882 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -313,6 +313,10 @@ def parse_arguments(): parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') args = parser.parse_args() + if args.debug: + logger.log("Debug Mode: ", Fore.GREEN, "ENABLED") + cfg.set_debug_mode(True) + if args.continuous: print_to_console("Continuous Mode: ", Fore.RED, "ENABLED") print_to_console( @@ -343,6 +347,7 @@ check_openai_api_key() cfg = Config() logger = configure_logging() parse_arguments() +logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" prompt = construct_prompt() # print(prompt) From 1f5049a9c434087865040e6d5014194cf6c3e7da Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Sat, 8 Apr 2023 01:05:20 +0200 Subject: [PATCH 07/17] doc(logger): added class documentation --- scripts/logger.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/scripts/logger.py b/scripts/logger.py index 9347ca626..4990ef8eb 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -14,6 +14,12 @@ import re cfg = Config() +''' +Logger that handle titles in different colors. +Outputs logs in console, activity.log, and errors.log +For console handler: simulates typing +''' + class Logger(metaclass=Singleton): def __init__(self): @@ -73,6 +79,11 @@ class Logger(metaclass=Singleton): self.file_handler.setLevel(level) +''' +Output stream to console using simulated typing +''' + + class TypingConsoleHandler(logging.StreamHandler): def emit(self, record): min_typing_speed = 0.05 @@ -95,6 +106,10 @@ class TypingConsoleHandler(logging.StreamHandler): self.handleError(record) +''' +Allows to handle custom placeholders 'title_color' and 'message_no_color'. +To use this formatter, make sure to pass 'color', 'title' as log extras. +''' class AutoGptFormatter(logging.Formatter): From 862d44ea39c88f4128c838e884fa310802c6efec Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 12 Apr 2023 14:39:54 +0200 Subject: [PATCH 08/17] fix(logger): fix typewriter simulation doesn't work well with Typing spinner --- scripts/chat.py | 18 +++---- scripts/json_parser.py | 15 +++--- scripts/logger.py | 76 +++++++++++++++++++++++++---- scripts/main.py | 103 +++++++++++++-------------------------- scripts/token_counter.py | 2 +- 5 files changed, 117 insertions(+), 97 deletions(-) diff --git a/scripts/chat.py b/scripts/chat.py index 8d5245141..e16cee383 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -66,12 +66,12 @@ def chat_with_ai( model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) + logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - logger.log(content=f'Memory Stats: {permanent_memory.get_stats()}', level=logging.DEBUG) + logger.debug(f'Memory Stats: {permanent_memory.get_stats()}') next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( prompt, relevant_memory, full_message_history, model) @@ -109,17 +109,17 @@ def chat_with_ai( # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # Debug print the current context - logger.log(content=f"Token limit: {token_limit}", level=logging.DEBUG) - logger.log(content=f"Send Token Count: {current_tokens_used}", level=logging.DEBUG) - logger.log(content=f"Tokens remaining for response: {tokens_remaining}", level=logging.DEBUG) - logger.log(content="------------ CONTEXT SENT TO AI ---------------", level=logging.DEBUG) + logger.debug(f"Token limit: {token_limit}") + logger.debug(f"Send Token Count: {current_tokens_used}") + logger.debug(f"Tokens remaining for response: {tokens_remaining}") + logger.debug("------------ CONTEXT SENT TO AI ---------------") for message in current_context: # Skip printing the prompt if message["role"] == "system" and message["content"] == prompt: continue - logger.log(content=f"{message['role'].capitalize()}: {message['content']}", level=logging.DEBUG) - logger.log(content="", level=logging.DEBUG) - logger.log(content="----------- END OF CONTEXT ----------------", level=logging.DEBUG) + logger.debug(f"{message['role'].capitalize()}: {message['content']}") + logger.debug("") + logger.debug("----------- END OF CONTEXT ----------------") # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about assistant_reply = create_chat_completion( diff --git a/scripts/json_parser.py b/scripts/json_parser.py index c70e66f40..29995629a 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -3,6 +3,7 @@ from typing import Any, Dict, Union from call_ai_function import call_ai_function from config import Config from json_utils import correct_json +from logger import logger cfg = Config() @@ -56,7 +57,7 @@ def fix_and_parse_json( # Can throw a ValueError if there is no "{" or "}" in the json_str except (json.JSONDecodeError, ValueError) as e: # noqa: F841 if try_to_fix_with_gpt: - print("Warning: Failed to parse AI output, attempting to fix." + logger.warn("Warning: Failed to parse AI output, attempting to fix." "\n If you see this warning frequently, it's likely that" " your prompt is confusing the AI. Try changing it up" " slightly.") @@ -68,7 +69,7 @@ def fix_and_parse_json( else: # This allows the AI to react to the error message, # which usually results in it correcting its ways. - print("Failed to fix AI output, telling the AI.") + logger.error("Failed to fix AI output, telling the AI.") return json_str else: raise e @@ -91,11 +92,11 @@ def fix_json(json_str: str, schema: str) -> str: result_string = call_ai_function( function_string, args, description_string, model=cfg.fast_llm_model ) - logger.log(content="------------ JSON FIX ATTEMPT ---------------", level=logging.DEBUG) - logger.log(content=f"Original JSON: {json_str}", level=logging.DEBUG) - logger.log(content="-----------", level=logging.DEBUG) - logger.log(content=f"Fixed JSON: {result_string}", level=logging.DEBUG) - logger.log(content="----------- END OF FIX ATTEMPT ----------------", level=logging.DEBUG) + logger.debug("------------ JSON FIX ATTEMPT ---------------") + logger.debug(f"Original JSON: {json_str}") + logger.debug("-----------") + logger.debug(f"Fixed JSON: {result_string}") + logger.debug("----------- END OF FIX ATTEMPT ----------------") try: json.loads(result_string) # just check the validity diff --git a/scripts/logger.py b/scripts/logger.py index 4990ef8eb..c6d995e0b 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -1,8 +1,10 @@ import logging import os import random +import re import time from logging import LogRecord +from colorama import Fore from colorama import Style @@ -10,8 +12,6 @@ import speak from config import Config from config import Singleton -import re - cfg = Config() ''' @@ -31,10 +31,16 @@ class Logger(metaclass=Singleton): log_file = "activity.log" error_file = "error.log" - # Create a handler for INFO level logs - self.console_handler = TypingConsoleHandler() - self.console_handler.setLevel(logging.INFO) console_formatter = AutoGptFormatter('%(title_color)s %(message)s') + + # Create a handler for console which simulate typing + self.typing_console_handler = TypingConsoleHandler() + self.typing_console_handler.setLevel(logging.INFO) + self.typing_console_handler.setFormatter(console_formatter) + + # Create a handler for console without typing simulation + self.console_handler = ConsoleHandler() + self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log @@ -50,10 +56,17 @@ class Logger(metaclass=Singleton): '%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s') error_handler.setFormatter(error_formatter) - self.logger = logging.getLogger(__name__) + self.typing_logger = logging.getLogger('TYPER') + self.typing_logger.addHandler(self.typing_console_handler) + self.typing_logger.addHandler(self.file_handler) + self.typing_logger.addHandler(error_handler) + self.typing_logger.setLevel(logging.DEBUG) + + self.logger = logging.getLogger('LOGGER') self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) + self.logger.setLevel(logging.DEBUG) def log( self, @@ -71,12 +84,45 @@ class Logger(metaclass=Singleton): else: content = "" - self.logger.log(level, content, extra={'title': title, 'color': title_color}) + self.typing_logger.log(level, content, extra={'title': title, 'color': title_color}) + + def debug( + self, + message, + title='', + title_color='', + ): + self._logs(title, title_color, message, logging.DEBUG) + + def warn( + self, + message, + title='', + title_color='', + ): + self._logs(title, title_color, message, logging.WARN) + + def error( + self, + title, + message='' + ): + self._logs(title, Fore.RED, message, logging.ERROR) + + def _logs( + self, + title='', + title_color='', + message='', + level=logging.INFO): + if message: + if isinstance(message, list): + message = " ".join(message) + self.logger.log(level, message, extra={'title': title, 'color': title_color}) def set_level(self, level): self.logger.setLevel(level) - self.console_handler.setLevel(level) - self.file_handler.setLevel(level) + self.typing_logger.setLevel(level) ''' @@ -105,6 +151,13 @@ class TypingConsoleHandler(logging.StreamHandler): except Exception: self.handleError(record) +class ConsoleHandler(logging.StreamHandler): + def emit(self, record): + msg = self.format(record) + try: + print(msg) + except Exception: + self.handleError(record) ''' Allows to handle custom placeholders 'title_color' and 'message_no_color'. @@ -114,7 +167,10 @@ To use this formatter, make sure to pass 'color', 'title' as log extras. class AutoGptFormatter(logging.Formatter): def format(self, record: LogRecord) -> str: - record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL + if (hasattr(record, 'color')): + record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL + else: + record.title_color = getattr(record, 'title') if hasattr(record, 'msg'): record.message_no_color = remove_color_codes(getattr(record, 'msg')) else: diff --git a/scripts/main.py b/scripts/main.py index 647ca7882..33f8b71c3 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -15,18 +15,11 @@ from ai_config import AIConfig import traceback import yaml import argparse +from logger import logger import logging cfg = Config() -def configure_logging(): - logging.basicConfig(filename='log.txt', - filemode='a', - format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', - datefmt='%H:%M:%S', - level=logging.DEBUG) - return logging.getLogger('AutoGPT') - def check_openai_api_key(): """Check if the OpenAI API key is set in config.py or as an environment variable.""" if not cfg.openai_api_key: @@ -37,39 +30,10 @@ def check_openai_api_key(): print("You can get your key from https://beta.openai.com/account/api-keys") exit(1) -def print_to_console( - title, - title_color, - content, - speak_text=False, - min_typing_speed=0.05, - max_typing_speed=0.01): - """Prints text to the console with a typing effect""" - global cfg - global logger - if speak_text and cfg.speak_mode: - speak.say_text(f"{title}. {content}") - print(title_color + title + " " + Style.RESET_ALL, end="") - if content: - logger.info(title + ': ' + content) - if isinstance(content, list): - content = " ".join(content) - words = content.split() - for i, word in enumerate(words): - print(word, end="", flush=True) - if i < len(words) - 1: - print(" ", end="", flush=True) - typing_speed = random.uniform(min_typing_speed, max_typing_speed) - time.sleep(typing_speed) - # type faster after each word - min_typing_speed = min_typing_speed * 0.95 - max_typing_speed = max_typing_speed * 0.95 - print() - def attempt_to_fix_json_by_finding_outermost_brackets(json_string): if cfg.speak_mode and cfg.debug_mode: speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") - print_to_console("Attempting to fix JSON by finding outermost brackets\n", Fore.RED, "") + logger.debug(title="Attempting to fix JSON by finding outermost brackets\n", title_color=Fore.RED) try: # Use regex to search for JSON objects @@ -80,7 +44,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string): if json_match: # Extract the valid JSON object from the string json_string = json_match.group(0) - print_to_console("Apparently json was fixed.", Fore.GREEN,"") + logger.debug(title="Apparently json was fixed.", title_color=Fore.GREEN) if cfg.speak_mode and cfg.debug_mode: speak.say_text("Apparently json was fixed.") else: @@ -89,7 +53,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string): except (json.JSONDecodeError, ValueError) as e: if cfg.speak_mode: speak.say_text("Didn't work. I will have to ignore this response then.") - print_to_console("Error: Invalid JSON, setting it to empty JSON now.\n", Fore.RED, "") + logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") json_string = {} return json_string @@ -103,7 +67,7 @@ def print_assistant_thoughts(assistant_reply): # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) except json.JSONDecodeError as e: - print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply) + logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) assistant_reply_json = fix_and_parse_json(assistant_reply_json) @@ -112,7 +76,7 @@ def print_assistant_thoughts(assistant_reply): try: assistant_reply_json = json.loads(assistant_reply_json) except json.JSONDecodeError as e: - print_to_console("Error: Invalid JSON in assistant thoughts\n", Fore.RED, assistant_reply) + logger.error("Error: Invalid JSON\n", assistant_reply) assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) assistant_thoughts_reasoning = None @@ -128,11 +92,11 @@ def print_assistant_thoughts(assistant_reply): assistant_thoughts_criticism = assistant_thoughts.get("criticism") assistant_thoughts_speak = assistant_thoughts.get("speak") - print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) - print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) + logger.log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) + logger.log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) if assistant_thoughts_plan: - print_to_console("PLAN:", Fore.YELLOW, "") + logger.log("PLAN:", Fore.YELLOW, "") # If it's a list, join it into a string if isinstance(assistant_thoughts_plan, list): assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) @@ -143,23 +107,23 @@ def print_assistant_thoughts(assistant_reply): lines = assistant_thoughts_plan.split('\n') for line in lines: line = line.lstrip("- ") - print_to_console("- ", Fore.GREEN, line.strip()) + logger.log("- ", Fore.GREEN, line.strip()) - print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) + logger.log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) # Speak the assistant's thoughts if cfg.speak_mode and assistant_thoughts_speak: speak.say_text(assistant_thoughts_speak) - + return assistant_reply_json except json.decoder.JSONDecodeError as e: - print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply) + logger.error("Error: Invalid JSON\n", assistant_reply) if cfg.speak_mode: speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") # All other errors, return "Error: + error message" except Exception as e: call_stack = traceback.format_exc() - print_to_console("Error: \n", Fore.RED, call_stack) + logger.error("Error: \n", call_stack) def load_variables(config_file="config.yaml"): @@ -220,7 +184,7 @@ def construct_prompt(): """Construct the prompt for the AI to respond to""" config = AIConfig.load() if config.ai_name: - print_to_console( + logger.log( f"Welcome back! ", Fore.GREEN, f"Would you like me to return to being {config.ai_name}?", @@ -249,14 +213,14 @@ def prompt_user(): """Prompt the user for input""" ai_name = "" # Construct the prompt - print_to_console( + logger.log( "Welcome to Auto-GPT! ", Fore.GREEN, "Enter the name of your AI and its role below. Entering nothing will load defaults.", speak_text=True) # Get AI Name from User - print_to_console( + logger.log( "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'") @@ -264,14 +228,14 @@ def prompt_user(): if ai_name == "": ai_name = "Entrepreneur-GPT" - print_to_console( + logger.log( f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True) # Get AI Role from User - print_to_console( + logger.log( "Describe your AI's role: ", Fore.GREEN, "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") @@ -280,7 +244,7 @@ def prompt_user(): ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." # Enter up to 5 goals for the AI - print_to_console( + logger.log( "Enter up to 5 goals for your AI: ", Fore.GREEN, "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") @@ -318,34 +282,33 @@ def parse_arguments(): cfg.set_debug_mode(True) if args.continuous: - print_to_console("Continuous Mode: ", Fore.RED, "ENABLED") - print_to_console( + logger.log("Continuous Mode: ", Fore.RED, "ENABLED") + logger.log( "WARNING: ", Fore.RED, "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") cfg.set_continuous_mode(True) if args.speak: - print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") + logger.log("Speak Mode: ", Fore.GREEN, "ENABLED") cfg.set_speak_mode(True) if args.gpt3only: - print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") + logger.log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) - + if args.gpt4only: - print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") + logger.log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_fast_llm_model(cfg.smart_llm_model) if args.debug: - print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") + logger.log("Debug Mode: ", Fore.GREEN, "ENABLED") cfg.set_debug_mode(True) # TODO: fill in llm values here check_openai_api_key() cfg = Config() -logger = configure_logging() parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" @@ -383,14 +346,14 @@ while True: if cfg.speak_mode: speak.say_text(f"I want to execute {command_name}") except Exception as e: - print_to_console("Error: \n", Fore.RED, str(e)) + logger.error("Error: \n", str(e)) if not cfg.continuous_mode and next_action_count == 0: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit user_input = "" - print_to_console( + logger.log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") @@ -419,7 +382,7 @@ while True: break if user_input == "GENERATE NEXT COMMAND JSON": - print_to_console( + logger.log( "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", Fore.MAGENTA, "") @@ -428,7 +391,7 @@ while True: break else: # Print command - print_to_console( + logger.log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") @@ -453,9 +416,9 @@ while True: # history if result is not None: full_message_history.append(chat.create_chat_message("system", result)) - print_to_console("SYSTEM: ", Fore.YELLOW, result) + logger.log("SYSTEM: ", Fore.YELLOW, result) else: full_message_history.append( chat.create_chat_message( "system", "Unable to execute command")) - print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + logger.log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") diff --git a/scripts/token_counter.py b/scripts/token_counter.py index a28a9868e..635d32863 100644 --- a/scripts/token_counter.py +++ b/scripts/token_counter.py @@ -15,7 +15,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5 try: encoding = tiktoken.encoding_for_model(model) except KeyError: - print("Warning: model not found. Using cl100k_base encoding.") + logger.warn("Warning: model not found. Using cl100k_base encoding.") encoding = tiktoken.get_encoding("cl100k_base") if model == "gpt-3.5-turbo": # !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.") From 158c576a8758923c693fe3df3741a471f3f5807d Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 12 Apr 2023 14:43:32 +0200 Subject: [PATCH 09/17] refactor(logger): rename the typewriter log function from 'log' to 'typewriter_log' --- scripts/logger.py | 10 +++++----- scripts/main.py | 46 +++++++++++++++++++++++----------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/scripts/logger.py b/scripts/logger.py index c6d995e0b..005210c4b 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -68,7 +68,7 @@ class Logger(metaclass=Singleton): self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) - def log( + def typewriter_log( self, title='', title_color='', @@ -92,7 +92,7 @@ class Logger(metaclass=Singleton): title='', title_color='', ): - self._logs(title, title_color, message, logging.DEBUG) + self._log(title, title_color, message, logging.DEBUG) def warn( self, @@ -100,16 +100,16 @@ class Logger(metaclass=Singleton): title='', title_color='', ): - self._logs(title, title_color, message, logging.WARN) + self._log(title, title_color, message, logging.WARN) def error( self, title, message='' ): - self._logs(title, Fore.RED, message, logging.ERROR) + self._log(title, Fore.RED, message, logging.ERROR) - def _logs( + def _log( self, title='', title_color='', diff --git a/scripts/main.py b/scripts/main.py index 33f8b71c3..4160fe8a4 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -92,11 +92,11 @@ def print_assistant_thoughts(assistant_reply): assistant_thoughts_criticism = assistant_thoughts.get("criticism") assistant_thoughts_speak = assistant_thoughts.get("speak") - logger.log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) - logger.log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) + logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) + logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) if assistant_thoughts_plan: - logger.log("PLAN:", Fore.YELLOW, "") + logger.typewriter_log("PLAN:", Fore.YELLOW, "") # If it's a list, join it into a string if isinstance(assistant_thoughts_plan, list): assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) @@ -107,9 +107,9 @@ def print_assistant_thoughts(assistant_reply): lines = assistant_thoughts_plan.split('\n') for line in lines: line = line.lstrip("- ") - logger.log("- ", Fore.GREEN, line.strip()) + logger.typewriter_log("- ", Fore.GREEN, line.strip()) - logger.log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) + logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) # Speak the assistant's thoughts if cfg.speak_mode and assistant_thoughts_speak: speak.say_text(assistant_thoughts_speak) @@ -184,7 +184,7 @@ def construct_prompt(): """Construct the prompt for the AI to respond to""" config = AIConfig.load() if config.ai_name: - logger.log( + logger.typewriter_log( f"Welcome back! ", Fore.GREEN, f"Would you like me to return to being {config.ai_name}?", @@ -213,14 +213,14 @@ def prompt_user(): """Prompt the user for input""" ai_name = "" # Construct the prompt - logger.log( + logger.typewriter_log( "Welcome to Auto-GPT! ", Fore.GREEN, "Enter the name of your AI and its role below. Entering nothing will load defaults.", speak_text=True) # Get AI Name from User - logger.log( + logger.typewriter_log( "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'") @@ -228,14 +228,14 @@ def prompt_user(): if ai_name == "": ai_name = "Entrepreneur-GPT" - logger.log( + logger.typewriter_log( f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True) # Get AI Role from User - logger.log( + logger.typewriter_log( "Describe your AI's role: ", Fore.GREEN, "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") @@ -244,7 +244,7 @@ def prompt_user(): ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." # Enter up to 5 goals for the AI - logger.log( + logger.typewriter_log( "Enter up to 5 goals for your AI: ", Fore.GREEN, "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") @@ -278,31 +278,31 @@ def parse_arguments(): args = parser.parse_args() if args.debug: - logger.log("Debug Mode: ", Fore.GREEN, "ENABLED") + logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") cfg.set_debug_mode(True) if args.continuous: - logger.log("Continuous Mode: ", Fore.RED, "ENABLED") - logger.log( + logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") + logger.typewriter_log( "WARNING: ", Fore.RED, "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") cfg.set_continuous_mode(True) if args.speak: - logger.log("Speak Mode: ", Fore.GREEN, "ENABLED") + logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") cfg.set_speak_mode(True) if args.gpt3only: - logger.log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") + logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) if args.gpt4only: - logger.log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") + logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_fast_llm_model(cfg.smart_llm_model) if args.debug: - logger.log("Debug Mode: ", Fore.GREEN, "ENABLED") + logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") cfg.set_debug_mode(True) @@ -353,7 +353,7 @@ while True: # Get key press: Prompt the user to press enter to continue or escape # to exit user_input = "" - logger.log( + logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") @@ -382,7 +382,7 @@ while True: break if user_input == "GENERATE NEXT COMMAND JSON": - logger.log( + logger.typewriter_log( "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", Fore.MAGENTA, "") @@ -391,7 +391,7 @@ while True: break else: # Print command - logger.log( + logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") @@ -416,9 +416,9 @@ while True: # history if result is not None: full_message_history.append(chat.create_chat_message("system", result)) - logger.log("SYSTEM: ", Fore.YELLOW, result) + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) else: full_message_history.append( chat.create_chat_message( "system", "Unable to execute command")) - logger.log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") From 0f3c85335c5d56919f9559fccdf25807d2181376 Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 12 Apr 2023 19:27:49 +0200 Subject: [PATCH 10/17] fix(logger): fix file handler not outputing DEBUG --- scripts/logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/logger.py b/scripts/logger.py index 005210c4b..a609e6027 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -45,7 +45,7 @@ class Logger(metaclass=Singleton): # Info handler in activity.log self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) - self.file_handler.setLevel(logging.INFO) + self.file_handler.setLevel(logging.DEBUG) info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s') self.file_handler.setFormatter(info_formatter) From 9f8b9db7bb09020ff9cba7e48e9ce71502f32dbe Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 12 Apr 2023 20:08:47 +0200 Subject: [PATCH 11/17] fix(global): fix wrong calls --- scripts/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index 4160fe8a4..d7b94085c 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -33,7 +33,7 @@ def check_openai_api_key(): def attempt_to_fix_json_by_finding_outermost_brackets(json_string): if cfg.speak_mode and cfg.debug_mode: speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") - logger.debug(title="Attempting to fix JSON by finding outermost brackets\n", title_color=Fore.RED) + logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") try: # Use regex to search for JSON objects @@ -44,7 +44,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string): if json_match: # Extract the valid JSON object from the string json_string = json_match.group(0) - logger.debug(title="Apparently json was fixed.", title_color=Fore.GREEN) + logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) if cfg.speak_mode and cfg.debug_mode: speak.say_text("Apparently json was fixed.") else: From fa5b71c022487896d3b07a1574d1faf6ebbeca19 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Tue, 11 Apr 2023 20:23:41 +0200 Subject: [PATCH 12/17] docs: Update README.md about running tests and coverage --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index dc3386f58..8862ade7c 100644 --- a/README.md +++ b/README.md @@ -96,9 +96,10 @@ pip install -r requirements.txt ``` 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively + +- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. +- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. +- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively ## 🔧 Usage @@ -113,9 +114,11 @@ python scripts/main.py 3. To exit the program, type "exit" and press Enter. ### Logs -You will find activity and error logs in the folder ```./logs``` + +You will find activity and error logs in the folder `./logs` To output debug logs: + ``` python scripts/main.py --debug ``` From dc0a94bba36549411b108763cd11ba82e94fbf7e Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:02:05 +0200 Subject: [PATCH 13/17] ci: Add a flake8 linting job --- .github/workflows/unit_tests.yml | 3 +++ requirements.txt | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 5973dd029..dda45e6c1 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -30,6 +30,9 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt + - name: Lint with flake8 + run: flake8 scripts/ tests/ + - name: Run unittest tests with coverage run: | coverage run --source=scripts -m unittest discover tests diff --git a/requirements.txt b/requirements.txt index b196c3d78..b864c1d3e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,5 @@ pinecone-client==2.2.1 redis orjson Pillow -coverage \ No newline at end of file +coverage +flake8 \ No newline at end of file From 87d465a8f1e2cd85772dfdbefe2b0aabf47d7f0d Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:02:28 +0200 Subject: [PATCH 14/17] chore: Rename unit test workflow file to ci.yml --- .github/workflows/{unit_tests.yml => ci.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{unit_tests.yml => ci.yml} (98%) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/ci.yml similarity index 98% rename from .github/workflows/unit_tests.yml rename to .github/workflows/ci.yml index dda45e6c1..a06e5ff9c 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Unit Tests +name: Python CI on: push: From 76cc0d2d743c99a5b954ca10dd30a6381877b758 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:03:57 +0200 Subject: [PATCH 15/17] docs: Document flake8 linter --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 8862ade7c..fe49b247b 100644 --- a/README.md +++ b/README.md @@ -323,3 +323,11 @@ To run tests and see coverage, run the following command: ``` coverage run -m unittest discover tests ``` + +## Run linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: + +``` +flake8 scripts/ tests/ +``` \ No newline at end of file From 29d6ecd4d30510c6887bb21f0a21f355de47707f Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:07:03 +0200 Subject: [PATCH 16/17] ci: Allow flake8 failure since there are a lot of issues --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a06e5ff9c..3d3628bc8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,6 +31,7 @@ jobs: pip install -r requirements.txt - name: Lint with flake8 + continue-on-error: true run: flake8 scripts/ tests/ - name: Run unittest tests with coverage From d780988554918d4ad0b4b4e8187bf34df1af5868 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 16:02:18 +0200 Subject: [PATCH 17/17] chore: Add new lines to end of files --- .gitignore | 2 +- main.py | 2 +- tests/context.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index aa0dceaa9..cfa3b08b5 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ log.txt # Coverage reports .coverage coverage.xml -htmlcov/ \ No newline at end of file +htmlcov/ diff --git a/main.py b/main.py index 5f044237e..656c34ecb 100644 --- a/main.py +++ b/main.py @@ -1 +1 @@ -from scripts.main import main \ No newline at end of file +from scripts.main import main diff --git a/tests/context.py b/tests/context.py index 2adb9dd6e..b668c8dc2 100644 --- a/tests/context.py +++ b/tests/context.py @@ -2,4 +2,4 @@ import sys import os sys.path.insert(0, os.path.abspath( - os.path.join(os.path.dirname(__file__), '../scripts'))) \ No newline at end of file + os.path.join(os.path.dirname(__file__), '../scripts')))