From a5048aed56eb1683742aa758b65ad3bd1339ce95 Mon Sep 17 00:00:00 2001 From: ryanmac Date: Mon, 3 Apr 2023 14:20:45 -0500 Subject: [PATCH 01/69] Fix .gitignore to include the correct path to auto_gpt_workspace --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 6b8f00b51..55a21afeb 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ scripts/node_modules/ scripts/__pycache__/keys.cpython-310.pyc package-lock.json *.pyc -scripts/auto_gpt_workspace/* +auto_gpt_workspace/* *.mpeg .env last_run_ai_settings.yaml From ae60025f7c2d339e7a1463546b06fd448df26016 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 4 Apr 2023 12:20:59 +0100 Subject: [PATCH 02/69] =?UTF-8?q?Updates=20Sponsors=20List=20=F0=9F=92=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5848c585d..d66a60222 100644 --- a/README.md +++ b/README.md @@ -20,10 +20,11 @@ Your support is greatly appreciated Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. πŸ’–

-nocodeclarity  tjarmain  tekelsey  robinicus  digisomni   +thepok  SpacingLily  m  zkonduit  maxxflyer  tekelsey  nocodeclarity  tjarmain  alexisneuhaus  jaumebalust  robinicus  digisomni  

+

-alexisneuhaus  iokode  jaumebalust   +alexisneuhaus  iokode  jaumebalust  nova-land  robinicus  Void-n-Null  ritesh24  merwanehamadi  raulmarindev  siduppal  goosecubedaddy  pleabargain  

From 8150f0c813962aaec6b0c070b27c5201e858ef8b Mon Sep 17 00:00:00 2001 From: Pavloh <89773173+ImPavloh@users.noreply.github.com> Date: Tue, 4 Apr 2023 13:24:14 +0200 Subject: [PATCH 03/69] Refactor load_prompt function Simplify the load_prompt function by eliminating redundancy --- scripts/data.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/scripts/data.py b/scripts/data.py index 72a6bbfc6..8d8a7b4ac 100644 --- a/scripts/data.py +++ b/scripts/data.py @@ -1,15 +1,14 @@ import os from pathlib import Path -SRC_DIR = Path(__file__).parent def load_prompt(): try: # get directory of this file: - file_dir = Path(os.path.dirname(os.path.realpath(__file__))) - data_dir = file_dir / "data" - prompt_file = data_dir / "prompt.txt" - # Load the promt from data/prompt.txt - with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file: + file_dir = Path(__file__).parent + prompt_file_path = file_dir / "data" / "prompt.txt" + + # Load the prompt from data/prompt.txt + with open(prompt_file_path, "r") as prompt_file: prompt = prompt_file.read() return prompt From e1d3cc94921efab79db2608e3ff62aae39493228 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Tue, 4 Apr 2023 13:09:33 +0100 Subject: [PATCH 04/69] Fixes: TypeError: eleven_labs_speech() missing 1 required positional argument: 'text' --- scripts/speak.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/speak.py b/scripts/speak.py index 6fef41f83..f6242a37d 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -42,7 +42,7 @@ def say_text(text, voice_index=0): if not cfg.elevenlabs_api_key: gtts_speech(text) else: - success = eleven_labs_speech() + success = eleven_labs_speech(text) if not success: gtts_speech(text) From 624e5b8a181dc276c7856b5d545abe2a36a59759 Mon Sep 17 00:00:00 2001 From: thepok Date: Tue, 4 Apr 2023 15:21:46 +0200 Subject: [PATCH 05/69] More logical Prompt - found by Agusx1211 --- scripts/data/prompt.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index a93e783eb..64a4fdba9 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -43,12 +43,6 @@ You should only respond in JSON format as described below RESPONSE FORMAT: { - "command": { - "name": "command name", - "args":{ - "arg name": "value" - } - }, "thoughts": { "text": "thought", @@ -56,6 +50,12 @@ RESPONSE FORMAT: "plan": "- short bulleted\n- list that conveys\n- long-term plan", "criticism": "constructive self-criticism", "speak": "thoughts summary to say to user" + }, + "command": { + "name": "command name", + "args":{ + "arg name": "value" + } } } From ed5952782fbdf388f335768d12a958088c611d68 Mon Sep 17 00:00:00 2001 From: slavakurilyak Date: Tue, 4 Apr 2023 15:53:59 -0500 Subject: [PATCH 06/69] Improve key validation and handling in overwrite_memory and message_agent functions --- scripts/commands.py | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/scripts/commands.py b/scripts/commands.py index f8f96fe72..c1d5db9d3 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -16,6 +16,13 @@ from googleapiclient.errors import HttpError cfg = Config() +def is_valid_int(value): + try: + int(value) + return True + except ValueError: + return False + def get_command(response): try: response_json = fix_and_parse_json(response) @@ -194,14 +201,28 @@ def delete_memory(key): def overwrite_memory(key, string): - if int(key) >= 0 and key < len(mem.permanent_memory): - _text = "Overwriting memory with key " + \ - str(key) + " and string " + string - mem.permanent_memory[key] = string + # Check if the key is a valid integer + if is_valid_int(key): + key_int = int(key) + # Check if the integer key is within the range of the permanent_memory list + if 0 <= key_int < len(mem.permanent_memory): + _text = "Overwriting memory with key " + str(key) + " and string " + string + # Overwrite the memory slot with the given integer key and string + mem.permanent_memory[key_int] = string + print(_text) + return _text + else: + print(f"Invalid key '{key}', out of range.") + return None + # Check if the key is a valid string + elif isinstance(key, str): + _text = "Overwriting memory with key " + key + " and string " + string + # Overwrite the memory slot with the given string key and string + mem.string_key_memory[key] = string print(_text) return _text else: - print("Invalid key, cannot overwrite memory.") + print(f"Invalid key '{key}', must be an integer or a string.") return None @@ -235,7 +256,12 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model): def message_agent(key, message): global cfg - agent_response = agents.message_agent(key, message) + + # Check if the key is a valid integer + if not is_valid_int(key): + return "Invalid key, cannot message agent." + + agent_response = agents.message_agent(int(key), message) # Speak response if cfg.speak_mode: From 1e4732807931f815feb3f6dfab22b5f7c4d0ce15 Mon Sep 17 00:00:00 2001 From: slavakurilyak Date: Tue, 4 Apr 2023 20:32:15 -0500 Subject: [PATCH 07/69] Add search files command --- scripts/commands.py | 4 +++- scripts/data/prompt.txt | 11 ++++++----- scripts/file_operations.py | 13 +++++++++++++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/scripts/commands.py b/scripts/commands.py index c1d5db9d3..a6e8f7451 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -6,7 +6,7 @@ import agent_manager as agents import speak from config import Config import ai_functions as ai -from file_operations import read_file, write_to_file, append_to_file, delete_file +from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files from execute_code import execute_python_file from json_parser import fix_and_parse_json from duckduckgo_search import ddg @@ -90,6 +90,8 @@ def execute_command(command_name, arguments): return append_to_file(arguments["file"], arguments["text"]) elif command_name == "delete_file": return delete_file(arguments["file"]) + elif command_name == "search_files": + return search_files(arguments["directory"]) elif command_name == "browse_website": return browse_website(arguments["url"], arguments["question"]) # TODO: Change these to take in a file rather than pasted code, if diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index a93e783eb..75d9312a4 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -19,11 +19,12 @@ COMMANDS: 11. Read file: "read_file", args: "file": "" 12. Append to file: "append_to_file", args: "file": "", "text": "" 13. Delete file: "delete_file", args: "file": "" -14. Evaluate Code: "evaluate_code", args: "code": "" -15. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" -16. Write Tests: "write_tests", args: "code": "", "focus": "" -17. Execute Python File: "execute_python_file", args: "file": "" -18. Task Complete (Shutdown): "task_complete", args: "reason": "" +14. Search Files: "search_files", args: "directory": "" +15. Evaluate Code: "evaluate_code", args: "code": "" +16. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" +17. Write Tests: "write_tests", args: "code": "", "focus": "" +18. Execute Python File: "execute_python_file", args: "file": "" +19. Task Complete (Shutdown): "task_complete", args: "reason": "" RESOURCES: diff --git a/scripts/file_operations.py b/scripts/file_operations.py index 81ad47157..f3cd3a458 100644 --- a/scripts/file_operations.py +++ b/scripts/file_operations.py @@ -58,3 +58,16 @@ def delete_file(filename): return "File deleted successfully." except Exception as e: return "Error: " + str(e) + +def search_files(directory): + found_files = [] + search_directory = safe_join(working_directory, directory) + + for root, _, files in os.walk(search_directory): + for file in files: + if file.startswith('.'): + continue + relative_path = os.path.relpath(os.path.join(root, file), working_directory) + found_files.append(relative_path) + + return found_files \ No newline at end of file From b418861d70399eb021c2bec71ff1b7bc7780155f Mon Sep 17 00:00:00 2001 From: slavakurilyak Date: Tue, 4 Apr 2023 20:53:41 -0500 Subject: [PATCH 08/69] Update message_agent function to support string keys --- scripts/commands.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/scripts/commands.py b/scripts/commands.py index c1d5db9d3..2adb84cf5 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -256,18 +256,20 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model): def message_agent(key, message): global cfg - + # Check if the key is a valid integer - if not is_valid_int(key): - return "Invalid key, cannot message agent." - - agent_response = agents.message_agent(int(key), message) + if is_valid_int(key): + agent_response = agents.message_agent(int(key), message) + # Check if the key is a valid string + elif isinstance(key, str): + agent_response = agents.message_agent(key, message) + else: + return "Invalid key, must be an integer or a string." # Speak response if cfg.speak_mode: - speak.say_text(agent_response, 1) - - return f"Agent {key} responded: {agent_response}" + say.speak(agent_response) + return agent_response def list_agents(): From 98a2b4d9a5453336fc718b3a4ec8fbcc3207251e Mon Sep 17 00:00:00 2001 From: ReadyG <42131870+ReadyG@users.noreply.github.com> Date: Wed, 5 Apr 2023 00:11:19 -0400 Subject: [PATCH 09/69] Update prompt.txt Added double quotes around on line 14. --- scripts/data/prompt.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index a93e783eb..ba9a3ec62 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -11,7 +11,7 @@ COMMANDS: 3. Memory Delete: "memory_del", args: "key": "" 4. Memory Overwrite: "memory_ovr", args: "key": "", "string": "" 5. Browse Website: "browse_website", args: "url": "", "question": "" -6. Start GPT Agent: "start_agent", args: "name": , "task": "", "prompt": "" +6. Start GPT Agent: "start_agent", args: "name": "", "task": "", "prompt": "" 7. Message GPT Agent: "message_agent", args: "key": "", "message": "" 8. List GPT Agents: "list_agents", args: "" 9. Delete GPT Agent: "delete_agent", args: "key": "" From c1be8a74038bf47aecd77df04b1b08716ab63e82 Mon Sep 17 00:00:00 2001 From: russellocean Date: Wed, 5 Apr 2023 00:56:15 -0400 Subject: [PATCH 10/69] Implement custom continuous task count with 'y -n' --- scripts/main.py | 61 +++++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index 93124234e..492df20b0 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -277,6 +277,7 @@ prompt = construct_prompt() # Initialize variables full_message_history = [] result = None +next_action_count = 0 # Make a constant: user_input = "Determine which next command to use, and respond using the format specified above:" @@ -291,7 +292,6 @@ while True: mem.permanent_memory, cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - # print("assistant reply: "+assistant_reply) # Print Assistant thoughts print_assistant_thoughts(assistant_reply) @@ -301,36 +301,45 @@ while True: except Exception as e: print_to_console("Error: \n", Fore.RED, str(e)) - if not cfg.continuous_mode: + if not cfg.continuous_mode or next_action_count > 0: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit - user_input = "" - print_to_console( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - print( - f"Enter 'y' to authorise command or 'n' to exit program, or enter feedback for {ai_name}...", - flush=True) - while True: - console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": - user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower() == "n": - user_input = "EXIT" - break - else: - user_input = console_input - command_name = "human_feedback" - break + if next_action_count == 0: + user_input = "" + print_to_console( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -n' to run n continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", + flush=True) + while True: + console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower() == "y": + user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + next_action_count = abs(int(console_input.split(" ")[1])) + user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + user_input = "EXIT" + break + else: + user_input = console_input + command_name = "human_feedback" + break if user_input == "GENERATE NEXT COMMAND JSON": print_to_console( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "") + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") elif user_input == "EXIT": print("Exiting...", flush=True) break @@ -348,6 +357,8 @@ while True: result = f"Human feedback: {user_input}" else: result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + if next_action_count > 0: + next_action_count -= 1 # Check if there's a result from the command append it to the message # history From c8c8f5b11e4f6c1d7297434996dd5159cb313d59 Mon Sep 17 00:00:00 2001 From: russellocean Date: Wed, 5 Apr 2023 10:19:56 -0400 Subject: [PATCH 11/69] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks to @Gerqus Co-authored-by: PaweΕ‚ Pieniacki --- scripts/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index 492df20b0..f7692757d 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -301,7 +301,7 @@ while True: except Exception as e: print_to_console("Error: \n", Fore.RED, str(e)) - if not cfg.continuous_mode or next_action_count > 0: + if not cfg.continuous_mode and next_action_count === 0: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit @@ -312,7 +312,7 @@ while True: Fore.CYAN, f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") print( - f"Enter 'y' to authorise command, 'y -n' to run n continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", flush=True) while True: console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) From c8a927d3ec35cc01feadad470eacb617902b6987 Mon Sep 17 00:00:00 2001 From: russellocean Date: Wed, 5 Apr 2023 10:28:50 -0400 Subject: [PATCH 12/69] Syntax Error, Revert formatting errors, remove 308 --- scripts/main.py | 63 ++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index f7692757d..7f1f24b1e 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -301,45 +301,44 @@ while True: except Exception as e: print_to_console("Error: \n", Fore.RED, str(e)) - if not cfg.continuous_mode and next_action_count === 0: + if not cfg.continuous_mode and next_action_count == 0: ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit - if next_action_count == 0: - user_input = "" - print_to_console( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - print( - f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", - flush=True) - while True: - console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": + user_input = "" + print_to_console( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", + flush=True) + while True: + console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower() == "y": + user_input = "GENERATE NEXT COMMAND JSON" + break + elif console_input.lower().startswith("y -"): + try: + next_action_count = abs(int(console_input.split(" ")[1])) user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower().startswith("y -"): - try: - next_action_count = abs(int(console_input.split(" ")[1])) - user_input = "GENERATE NEXT COMMAND JSON" - except ValueError: - print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") - continue - break - elif console_input.lower() == "n": - user_input = "EXIT" - break - else: - user_input = console_input - command_name = "human_feedback" - break + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + user_input = "EXIT" + break + else: + user_input = console_input + command_name = "human_feedback" + break if user_input == "GENERATE NEXT COMMAND JSON": print_to_console( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "") + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") elif user_input == "EXIT": print("Exiting...", flush=True) break From b0cd26ac240c884ed817cbec0f8d0aa79b56bf61 Mon Sep 17 00:00:00 2001 From: Mano Bharathi M <88357044+ManoBharathi93@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:30:30 +0530 Subject: [PATCH 13/69] Added Contributing.md file --- CONTRIBUTING.md | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..73d38d67c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,55 @@ + +To contribute to this GitHub project, you can follow these steps: + +1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page. + +2. Clone the repository to your local machine using the following command: + +``` +git clone https://github.com/Torantulino/Auto-GPT +``` +3. Create a new branch for your changes using the following command: + +``` +git checkout -b "branch-name" +``` +4. Make your changes to the code or documentation. +- Example: Improve User Interface or Add Documentation. + +5. Add the changes to the staging area using the following command: +``` +git add . +``` + +6. Commit the changes with a meaningful commit message using the following command: +``` +git commit -m "your commit message" +``` +7. Push the changes to your forked repository using the following command: +``` +git push origin branch-name +``` +8. Go to the GitHub website and navigate to your forked repository. + +9. Click the "New pull request" button. + +10. Select the branch you just pushed to and the branch you want to merge into on the original repository. + +11. Add a description of your changes and click the "Create pull request" button. + +12. Wait for the project maintainer to review your changes and provide feedback. + +13. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project. + +14. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands: + +``` +git fetch upstream +git checkout master +git merge upstream/master +``` +Finally, delete the branch you created with the following command: +``` +git branch -d branch-name +``` +That's it you made it 🐣⭐⭐ From 29a45de2531f014f44501bf815f86f7cd8d7cff9 Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 5 Apr 2023 19:08:53 +0200 Subject: [PATCH 14/69] fix(json-jixer): enclose arguments in a python multi-line string so the fix_json assistant (GPT-3.5) can understand we are providing 2 strings arguments instead of one --- scripts/json_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 11ff9ed22..cb05796d4 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -51,7 +51,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True): def fix_json(json_str: str, schema: str, debug=False) -> str: # Try to fix the JSON using gpt: function_string = "def fix_json(json_str: str, schema:str=None) -> str:" - args = [json_str, schema] + args = [f"'''{json_str}'''", f"'''{schema}'''"] description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect.""" # If it doesn't already start with a "`", add one: From a868a39b88a89483bca556f6026e56b3d9c9d991 Mon Sep 17 00:00:00 2001 From: Peter Edwards Date: Wed, 5 Apr 2023 19:44:28 +0200 Subject: [PATCH 15/69] Added functionality to allow the use of GPT on a Microsoft Azure instance --- .env.template | 6 +++++- README.md | 1 + scripts/config.py | 10 ++++++++++ scripts/llm_utils.py | 21 +++++++++++++++------ 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/.env.template b/.env.template index c64d85028..b8e896639 100644 --- a/.env.template +++ b/.env.template @@ -3,4 +3,8 @@ ELEVENLABS_API_KEY=your-elevenlabs-api-key SMART_LLM_MODEL="gpt-4" FAST_LLM_MODEL="gpt-3.5-turbo" GOOGLE_API_KEY= -CUSTOM_SEARCH_ENGINE_ID= \ No newline at end of file +CUSTOM_SEARCH_ENGINE_ID= +USE_AZURE=False +OPENAI_API_BASE=your-base-url-for-azure +OPENAI_API_VERSION=api-version-for-azure +OPENAI_DEPLOYMENT_ID=deployment-id-for-azure \ No newline at end of file diff --git a/README.md b/README.md index d66a60222..6acb0f6c5 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,7 @@ pip install -r requirements.txt 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. + - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_API_BASE`, `OPENAI_API_VERSION` and `OPENAI_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section ## πŸ”§ Usage diff --git a/scripts/config.py b/scripts/config.py index 766cb94f4..d97ded9ca 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -34,6 +34,16 @@ class Config(metaclass=Singleton): self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.openai_api_key = os.getenv("OPENAI_API_KEY") + self.use_azure = False + self.use_azure = os.getenv("USE_AZURE") + if self.use_azure: + self.openai_api_base = os.getenv("OPENAI_API_BASE") + self.openai_api_version = os.getenv("OPENAI_API_VERSION") + self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID") + openai.api_type = "azure" + openai.api_base = self.openai_api_base + openai.api_version = self.openai_api_version + self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") self.google_api_key = os.getenv("GOOGLE_API_KEY") diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py index 41f396250..5a471ab7a 100644 --- a/scripts/llm_utils.py +++ b/scripts/llm_utils.py @@ -6,11 +6,20 @@ openai.api_key = cfg.openai_api_key # Overly simple abstraction until we create something better def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str: - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens - ) + if cfg.use_azure: + response = openai.ChatCompletion.create( + deployment_id=cfg.openai_deployment_id, + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens + ) return response.choices[0].message["content"] From ffb95eb0310f67964562853943a512e069432ab1 Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 5 Apr 2023 20:16:28 +0200 Subject: [PATCH 16/69] fix(json_parser): remove the tab character that can be present in the generated json. It makes the json.loads function throw an Invalid JSON error --- scripts/json_parser.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 11ff9ed22..f7d607a1b 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -24,6 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True): """ try: + json_str = json_str.replace('\t', '') return json.loads(json_str) except Exception as e: # Let's do something manually - sometimes GPT responds with something BEFORE the braces: From 051be4df1007c72d2870f76b47c8ac0856d282cc Mon Sep 17 00:00:00 2001 From: Fabrice Hong Date: Wed, 5 Apr 2023 20:18:06 +0200 Subject: [PATCH 17/69] fix(json_parser): fixing the "TypeError('the JSON object must be str, bytes or bytearray, not dict')" after a json_fix is successful --- scripts/json_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 11ff9ed22..763a4789f 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -67,7 +67,8 @@ def fix_json(json_str: str, schema: str, debug=False) -> str: print(f"Fixed JSON: {result_string}") print("----------- END OF FIX ATTEMPT ----------------") try: - return json.loads(result_string) + json.loads(result_string) # just check the validity + return result_string except: # Get the call stack: # import traceback From 475671d1e846a370a38e2044a73aaa1da417fd9a Mon Sep 17 00:00:00 2001 From: douglas Date: Mon, 3 Apr 2023 20:31:01 -0400 Subject: [PATCH 18/69] Pinecone memory and memory usage tracking --- .env.template | 2 ++ README.md | 29 +++++++++++++++++++ requirements.txt | 3 +- scripts/chat.py | 37 ++++++++++++++++-------- scripts/commands.py | 38 ++----------------------- scripts/config.py | 12 +++++++- scripts/data/prompt.txt | 38 ++++++++++++------------- scripts/main.py | 17 +++++++++-- scripts/memory.py | 62 ++++++++++++++++++++++++++++++++++++++++- 9 files changed, 167 insertions(+), 71 deletions(-) diff --git a/.env.template b/.env.template index c64d85028..9fbffbcd4 100644 --- a/.env.template +++ b/.env.template @@ -1,3 +1,5 @@ +PINECONE_API_KEY=your-pinecone-api-key +PINECONE_ENV=your-pinecone-region OPENAI_API_KEY=your-openai-api-key ELEVENLABS_API_KEY=your-elevenlabs-api-key SMART_LLM_MODEL="gpt-4" diff --git a/README.md b/README.md index d66a60222..3af539c85 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,35 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ``` +## 🌲 Pinecone API Key Setup + +Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories +are loaded for the agent at any given time. + +1. Go to app.pinecone.io and make an account if you don't already have one. +2. Choose the `Starter` plan to avoid being charged. +3. Find your API key and region under the default project in the left sidebar. + +### Setting up environment variables + For Windows Users: +``` +setx PINECONE_API_KEY "YOUR_GOOGLE_API_KEY" +export PINECONE_ENV="Your region" # something like: us-east4-gcp + +``` +For macOS and Linux users: +``` +export PINECONE_API_KEY="YOUR_GOOGLE_API_KEY" +export PINECONE_ENV="Your region" # something like: us-east4-gcp + +``` + +Or you can set them in the `.env` file. + +## View Memory Usage + +1. View memory usage by using the `--debug` flag :) + ## πŸ’€ Continuous Mode ⚠️ Run the AI **without** user authorisation, 100% automated. Continuous mode is not recommended. diff --git a/requirements.txt b/requirements.txt index 158e93241..2efb371cc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,4 +10,5 @@ tiktoken==0.3.3 gTTS==2.3.1 docker duckduckgo-search -google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +google-api-python-client #(https://developers.google.com/custom-search/v1/overview) +pinecone-client==2.2.1 diff --git a/scripts/chat.py b/scripts/chat.py index 86a70b093..8da074c6b 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -23,6 +23,19 @@ def create_chat_message(role, content): return {"role": role, "content": content} +def generate_context(prompt, relevant_memory, full_message_history, model): + current_context = [ + create_chat_message( + "system", prompt), create_chat_message( + "system", f"Permanent memory: {relevant_memory}")] + + # Add messages from the full message history until we reach the token limit + next_message_to_add_index = len(full_message_history) - 1 + insertion_index = len(current_context) + # Count the currently used tokens + current_tokens_used = token_counter.count_message_tokens(current_context, model) + return next_message_to_add_index, current_tokens_used, insertion_index, current_context + # TODO: Change debug from hardcode to argument def chat_with_ai( @@ -41,7 +54,7 @@ def chat_with_ai( prompt (str): The prompt explaining the rules to the AI. user_input (str): The input from the user. full_message_history (list): The list of all messages sent between the user and the AI. - permanent_memory (list): The list of items in the AI's permanent memory. + permanent_memory (Obj): The memory object containing the permanent memory. token_limit (int): The maximum number of tokens allowed in the API call. Returns: @@ -53,18 +66,20 @@ def chat_with_ai( print(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - current_context = [ - create_chat_message( - "system", prompt), create_chat_message( - "system", f"Permanent memory: {permanent_memory}")] + relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) - # Add messages from the full message history until we reach the token limit - next_message_to_add_index = len(full_message_history) - 1 - current_tokens_used = 0 - insertion_index = len(current_context) + if debug: + print('Memory Stats: ', permanent_memory.get_stats()) + + next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( + prompt, relevant_memory, full_message_history, model) + + while current_tokens_used > 2500: + # remove memories until we are under 2500 tokens + relevant_memory = relevant_memory[1:] + next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( + prompt, relevant_memory, full_message_history, model) - # Count the currently used tokens - current_tokens_used = token_counter.count_message_tokens(current_context, model) current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later) while next_message_to_add_index >= 0: diff --git a/scripts/commands.py b/scripts/commands.py index f8f96fe72..0a7d27f49 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,6 +1,6 @@ import browse import json -import memory as mem +from memory import PineconeMemory import datetime import agent_manager as agents import speak @@ -45,6 +45,7 @@ def get_command(response): def execute_command(command_name, arguments): + memory = PineconeMemory() try: if command_name == "google": @@ -55,11 +56,7 @@ def execute_command(command_name, arguments): else: return google_search(arguments["input"]) elif command_name == "memory_add": - return commit_memory(arguments["string"]) - elif command_name == "memory_del": - return delete_memory(arguments["key"]) - elif command_name == "memory_ovr": - return overwrite_memory(arguments["key"], arguments["string"]) + return memory.add(arguments["string"]) elif command_name == "start_agent": return start_agent( arguments["name"], @@ -176,35 +173,6 @@ def get_hyperlinks(url): return link_list -def commit_memory(string): - _text = f"""Committing memory with string "{string}" """ - mem.permanent_memory.append(string) - return _text - - -def delete_memory(key): - if key >= 0 and key < len(mem.permanent_memory): - _text = "Deleting memory with key " + str(key) - del mem.permanent_memory[key] - print(_text) - return _text - else: - print("Invalid key, cannot delete memory.") - return None - - -def overwrite_memory(key, string): - if int(key) >= 0 and key < len(mem.permanent_memory): - _text = "Overwriting memory with key " + \ - str(key) + " and string " + string - mem.permanent_memory[key] = string - print(_text) - return _text - else: - print("Invalid key, cannot overwrite memory.") - return None - - def shutdown(): print("Shutting down...") quit() diff --git a/scripts/config.py b/scripts/config.py index 766cb94f4..4334f03ac 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -4,6 +4,7 @@ from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() + class Singleton(type): """ Singleton metaclass for ensuring only one instance of a class. @@ -39,6 +40,9 @@ class Config(metaclass=Singleton): self.google_api_key = os.getenv("GOOGLE_API_KEY") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") + self.pinecone_api_key = os.getenv("PINECONE_API_KEY") + self.pinecone_region = os.getenv("PINECONE_ENV") + # Initialize the OpenAI API client openai.api_key = self.openai_api_key @@ -70,4 +74,10 @@ class Config(metaclass=Singleton): self.google_api_key = value def set_custom_search_engine_id(self, value: str): - self.custom_search_engine_id = value \ No newline at end of file + self.custom_search_engine_id = value + + def set_pinecone_api_key(self, value: str): + self.pinecone_api_key = value + + def set_pinecone_region(self, value: str): + self.pinecone_region = value diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index a93e783eb..9f31fad79 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -1,29 +1,27 @@ CONSTRAINTS: -1. ~4000 word limit for memory. Your memory is short, so immediately save important information to long term memory and code to files. -2. No user assistance -3. Exclusively use the commands listed in double quotes e.g. "command name" +1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. +2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. +3. No user assistance +4. Exclusively use the commands listed in double quotes e.g. "command name" COMMANDS: 1. Google Search: "google", args: "input": "" -2. Memory Add: "memory_add", args: "string": "" -3. Memory Delete: "memory_del", args: "key": "" -4. Memory Overwrite: "memory_ovr", args: "key": "", "string": "" -5. Browse Website: "browse_website", args: "url": "", "question": "" -6. Start GPT Agent: "start_agent", args: "name": , "task": "", "prompt": "" -7. Message GPT Agent: "message_agent", args: "key": "", "message": "" -8. List GPT Agents: "list_agents", args: "" -9. Delete GPT Agent: "delete_agent", args: "key": "" -10. Write to file: "write_to_file", args: "file": "", "text": "" -11. Read file: "read_file", args: "file": "" -12. Append to file: "append_to_file", args: "file": "", "text": "" -13. Delete file: "delete_file", args: "file": "" -14. Evaluate Code: "evaluate_code", args: "code": "" -15. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" -16. Write Tests: "write_tests", args: "code": "", "focus": "" -17. Execute Python File: "execute_python_file", args: "file": "" -18. Task Complete (Shutdown): "task_complete", args: "reason": "" +2. Browse Website: "browse_website", args: "url": "", "question": "" +3. Start GPT Agent: "start_agent", args: "name": , "task": "", "prompt": "" +4. Message GPT Agent: "message_agent", args: "key": "", "message": "" +5. List GPT Agents: "list_agents", args: "" +6. Delete GPT Agent: "delete_agent", args: "key": "" +7. Write to file: "write_to_file", args: "file": "", "text": "" +8. Read file: "read_file", args: "file": "" +9. Append to file: "append_to_file", args: "file": "", "text": "" +10. Delete file: "delete_file", args: "file": "" +11. Evaluate Code: "evaluate_code", args: "code": "" +12. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" +13. Write Tests: "write_tests", args: "code": "", "focus": "" +14. Execute Python File: "execute_python_file", args: "file": "" +15. Task Complete (Shutdown): "task_complete", args: "reason": "" RESOURCES: diff --git a/scripts/main.py b/scripts/main.py index 93124234e..2b76842f5 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,7 +1,7 @@ import json import random import commands as cmd -import memory as mem +from memory import PineconeMemory import data import chat from colorama import Fore, Style @@ -280,6 +280,13 @@ result = None # Make a constant: user_input = "Determine which next command to use, and respond using the format specified above:" +# Initialize memory and make sure it is empty. +# this is particularly important for indexing and referencing pinecone memory +memory = PineconeMemory() +memory.clear() + +print('Using memory of type: ' + memory.__class__.__name__) + # Interaction Loop while True: # Send message to AI, get response @@ -288,7 +295,7 @@ while True: prompt, user_input, full_message_history, - mem.permanent_memory, + memory, cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument # print("assistant reply: "+assistant_reply) @@ -349,6 +356,12 @@ while True: else: result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + memory_to_add = f"Assistant Reply: {assistant_reply} " \ + f"\nResult: {result} " \ + f"\nHuman Feedback: {user_input} " + + memory.add(memory_to_add) + # Check if there's a result from the command append it to the message # history if result is not None: diff --git a/scripts/memory.py b/scripts/memory.py index 0dc5b7666..0d265a31d 100644 --- a/scripts/memory.py +++ b/scripts/memory.py @@ -1 +1,61 @@ -permanent_memory = [] +from config import Config, Singleton +import pinecone +import openai + +cfg = Config() + + +def get_ada_embedding(text): + text = text.replace("\n", " ") + return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] + + +def get_text_from_embedding(embedding): + return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"] + + +class PineconeMemory(metaclass=Singleton): + def __init__(self): + pinecone_api_key = cfg.pinecone_api_key + pinecone_region = cfg.pinecone_region + pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) + dimension = 1536 + metric = "cosine" + pod_type = "p1" + table_name = "auto-gpt" + # this assumes we don't start with memory. + # for now this works. + # we'll need a more complicated and robust system if we want to start with memory. + self.vec_num = 0 + if table_name not in pinecone.list_indexes(): + pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type) + self.index = pinecone.Index(table_name) + + def add(self, data): + vector = get_ada_embedding(data) + # no metadata here. We may wish to change that long term. + resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) + _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" + self.vec_num += 1 + return _text + + def get(self, data): + return self.get_relevant(data, 1) + + def clear(self): + self.index.delete(deleteAll=True) + return "Obliviated" + + def get_relevant(self, data, num_relevant=5): + """ + Returns all the data in the memory that is relevant to the given data. + :param data: The data to compare to. + :param num_relevant: The number of relevant data to return. Defaults to 5 + """ + query_embedding = get_ada_embedding(data) + results = self.index.query(query_embedding, top_k=num_relevant, include_metadata=True) + sorted_results = sorted(results.matches, key=lambda x: x.score) + return [str(item['metadata']["raw_text"]) for item in sorted_results] + + def get_stats(self): + return self.index.describe_index_stats() From 92b6fd9d160b43e962bfba1c2dbc9a6072d4d78b Mon Sep 17 00:00:00 2001 From: David Wurtz Date: Wed, 5 Apr 2023 13:40:09 -0700 Subject: [PATCH 19/69] first draft of github issue templates --- .github/ISSUE_TEMPLATE/1.bug.yml | 39 ++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/2.feature.yml | 29 +++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/1.bug.yml create mode 100644 .github/ISSUE_TEMPLATE/2.feature.yml diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml new file mode 100644 index 000000000..cf49ab5f8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -0,0 +1,39 @@ +name: Bug report πŸ› +description: Create a bug report for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + Thanks for contributing by creating an issue! ❀️ + - type: checkboxes + attributes: + label: Duplicates + description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Steps to reproduce πŸ•Ή + description: | + **⚠️ Issues that we can't reproduce will be closed.** + - type: textarea + attributes: + label: Current behavior 😯 + description: Describe what happens instead of the expected behavior. + - type: textarea + attributes: + label: Expected behavior πŸ€” + description: Describe what should happen. + - type: textarea + attributes: + label: Your prompt πŸ“ + description: | + Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + value: | + ```yaml + # Paste your prompt here + ``` \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml new file mode 100644 index 000000000..0ea882ef6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2.feature.yml @@ -0,0 +1,29 @@ +name: Feature request πŸš€ +description: Suggest a new idea for Auto-GPT. +labels: ['status: needs triage'] +body: + - type: markdown + attributes: + value: | + Please provide a searchable summary of the issue in the title above ⬆️. + + Thanks for contributing by creating an issue! ❀️ + - type: checkboxes + attributes: + label: Duplicates + description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Summary πŸ’‘ + description: Describe how it should work. + - type: textarea + attributes: + label: Examples 🌈 + description: Provide a link to other implementations, or screenshots of the expected behavior. + - type: textarea + attributes: + label: Motivation πŸ”¦ + description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world. \ No newline at end of file From 62854afe681063d12d915ed6da19ac756af78506 Mon Sep 17 00:00:00 2001 From: Petar Ostojic Date: Wed, 5 Apr 2023 23:44:16 +0200 Subject: [PATCH 20/69] Added fake user-agent headers to browser request. --- scripts/browse.py | 4 ++-- scripts/config.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/browse.py b/scripts/browse.py index 510f9c29d..0fda3d7b0 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -6,7 +6,7 @@ from llm_utils import create_chat_completion cfg = Config() def scrape_text(url): - response = requests.get(url) + response = requests.get(url, headers=cfg.user_agent_header) # Check if the response contains an HTTP error if response.status_code >= 400: @@ -40,7 +40,7 @@ def format_hyperlinks(hyperlinks): def scrape_links(url): - response = requests.get(url) + response = requests.get(url, headers=cfg.user_agent_header) # Check if the response contains an HTTP error if response.status_code >= 400: diff --git a/scripts/config.py b/scripts/config.py index 766cb94f4..4a4497694 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -39,6 +39,11 @@ class Config(metaclass=Singleton): self.google_api_key = os.getenv("GOOGLE_API_KEY") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") + # User agent headers to use when browsing web + # Some websites might just completely deny request with an error code if no user agent was found. + self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} + + # Initialize the OpenAI API client openai.api_key = self.openai_api_key From 962fc9a42a8d3e69b3a14f4791cf8a84b67f5504 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 08:07:54 +0100 Subject: [PATCH 21/69] Changes string_key_memory to permanent_memory. Fixes: ```Command memory_ovr returned: Error: module 'memory' has no attribute 'string_key_memory'``` --- scripts/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/commands.py b/scripts/commands.py index 2adb84cf5..f0e5911e8 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -218,7 +218,7 @@ def overwrite_memory(key, string): elif isinstance(key, str): _text = "Overwriting memory with key " + key + " and string " + string # Overwrite the memory slot with the given string key and string - mem.string_key_memory[key] = string + mem.permanent_memory[key] = string print(_text) return _text else: From 34b6f47f7128c1eb69309beddab6dac65a0d1236 Mon Sep 17 00:00:00 2001 From: Peter Edwards Date: Thu, 6 Apr 2023 09:15:45 +0200 Subject: [PATCH 22/69] Fix for boolean eval from .env --- scripts/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/config.py b/scripts/config.py index d97ded9ca..a2ddd4309 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -35,7 +35,7 @@ class Config(metaclass=Singleton): self.openai_api_key = os.getenv("OPENAI_API_KEY") self.use_azure = False - self.use_azure = os.getenv("USE_AZURE") + self.use_azure = os.getenv("USE_AZURE") == 'True' if self.use_azure: self.openai_api_base = os.getenv("OPENAI_API_BASE") self.openai_api_version = os.getenv("OPENAI_API_VERSION") From dcc29a5568a9d026648657859a8ece1043430952 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 08:29:28 +0100 Subject: [PATCH 23/69] Fixes broken reference to speak.say_text --- scripts/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/commands.py b/scripts/commands.py index f0e5911e8..b0b2f3a6d 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -268,7 +268,7 @@ def message_agent(key, message): # Speak response if cfg.speak_mode: - say.speak(agent_response) + speak.say_text(agent_response, 1) return agent_response From a55a64c1c9a8bc1ed6ea72f1bf42dbca375247ee Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 08:46:57 +0100 Subject: [PATCH 24/69] Adds voice index back in. Agents now have a different voice again. --- scripts/speak.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/speak.py b/scripts/speak.py index f6242a37d..13517d366 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -42,7 +42,7 @@ def say_text(text, voice_index=0): if not cfg.elevenlabs_api_key: gtts_speech(text) else: - success = eleven_labs_speech(text) + success = eleven_labs_speech(text, voice_index) if not success: gtts_speech(text) From b8f2dd59c8f54ce7efa068b50117212a121b11ac Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 09:23:03 +0100 Subject: [PATCH 25/69] Fixes "/" search not working. --- scripts/file_operations.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/file_operations.py b/scripts/file_operations.py index f3cd3a458..90c9a1e4d 100644 --- a/scripts/file_operations.py +++ b/scripts/file_operations.py @@ -61,7 +61,11 @@ def delete_file(filename): def search_files(directory): found_files = [] - search_directory = safe_join(working_directory, directory) + + if directory == "" or directory == "/": + search_directory = working_directory + else: + search_directory = safe_join(working_directory, directory) for root, _, files in os.walk(search_directory): for file in files: From a45ed8c42a86ca15387c6b4ec6575254e194e6a1 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 11:46:13 +0100 Subject: [PATCH 26/69] Moves last_run_ai_settings.yaml to root so it's easier to find and use. --- scripts/ai_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ai_config.py b/scripts/ai_config.py index 945fcfb23..678d3ab99 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -9,7 +9,7 @@ class AIConfig: self.ai_goals = ai_goals # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = "last_run_ai_settings.yaml" + SAVE_FILE = "../last_run_ai_settings.yaml" @classmethod def load(cls, config_file=SAVE_FILE): From 61685c6f0b04064447125b42732b776e4180cba3 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 11:47:12 +0100 Subject: [PATCH 27/69] Renames last_run_ai_settings to ai_settings. Hopefully this helps people realise they can edit it themselves. --- scripts/ai_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ai_config.py b/scripts/ai_config.py index 678d3ab99..2f4327486 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -9,7 +9,7 @@ class AIConfig: self.ai_goals = ai_goals # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = "../last_run_ai_settings.yaml" + SAVE_FILE = "../ai_settings.yaml" @classmethod def load(cls, config_file=SAVE_FILE): From a83f148878bb841dcc05f70fc5da6f859688a9c0 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 11:53:47 +0100 Subject: [PATCH 28/69] Adds default ai_settings file. --- ai_settings.yaml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 ai_settings.yaml diff --git a/ai_settings.yaml b/ai_settings.yaml new file mode 100644 index 000000000..1d29bd698 --- /dev/null +++ b/ai_settings.yaml @@ -0,0 +1,7 @@ +ai_goals: +- Increase net worth +- Grow Twitter Account +- Develop and manage multiple businesses autonomously +ai_name: Entrepreneur-GPT +ai_role: an AI designed to autonomously develop and run businesses with the sole goal + of increasing your net worth. From c41acbad4a4679db687d558deb789850e1c754ad Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 11:53:58 +0100 Subject: [PATCH 29/69] Tweaks default settings. --- ai_settings.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ai_settings.yaml b/ai_settings.yaml index 1d29bd698..b37ba849f 100644 --- a/ai_settings.yaml +++ b/ai_settings.yaml @@ -1,7 +1,7 @@ ai_goals: -- Increase net worth -- Grow Twitter Account -- Develop and manage multiple businesses autonomously +- Increase net worth. +- Develop and manage multiple businesses autonomously. +- Play to your strengths as a Large Language Model. ai_name: Entrepreneur-GPT ai_role: an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. From 89ab8397bf0185a5991cb02209a3fd42e8c4fa20 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 11:56:28 +0100 Subject: [PATCH 30/69] Ignores ai_settings.yaml --- .gitignore | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 55a21afeb..7091a8723 100644 --- a/.gitignore +++ b/.gitignore @@ -7,5 +7,5 @@ package-lock.json auto_gpt_workspace/* *.mpeg .env -last_run_ai_settings.yaml -outputs/* \ No newline at end of file +outputs/* +ai_settings.yaml \ No newline at end of file From 5987d6297b29988349a9aa05388d9612d58c3016 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 12:20:57 +0100 Subject: [PATCH 31/69] Update CONTRIBUTING.md --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 73d38d67c..001d55470 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,6 +16,7 @@ git checkout -b "branch-name" 4. Make your changes to the code or documentation. - Example: Improve User Interface or Add Documentation. + 5. Add the changes to the staging area using the following command: ``` git add . From 32e20611dfa18b4dbea223876ef3bd8d49b1b28b Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Thu, 6 Apr 2023 12:26:07 +0100 Subject: [PATCH 32/69] small typo in Pinecone settings that referred to google --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 790312991..a89c5d03b 100644 --- a/README.md +++ b/README.md @@ -152,14 +152,14 @@ are loaded for the agent at any given time. ### Setting up environment variables For Windows Users: ``` -setx PINECONE_API_KEY "YOUR_GOOGLE_API_KEY" -export PINECONE_ENV="Your region" # something like: us-east4-gcp +setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY" +export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp ``` For macOS and Linux users: ``` -export PINECONE_API_KEY="YOUR_GOOGLE_API_KEY" -export PINECONE_ENV="Your region" # something like: us-east4-gcp +export PINECONE_API_KEY="YOUR_PINECONE_API_KEY" +export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp ``` From 3f106963a8b461a52ec1d9acb3a3273cac3ad6d7 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Thu, 6 Apr 2023 13:22:05 +0100 Subject: [PATCH 33/69] Changes playsound requirement to 1.2.2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2efb371cc..ce2470985 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 colorama==0.4.6 openai==0.27.2 -playsound==1.3.0 +playsound==1.2.2 python-dotenv==1.0.0 pyyaml==6.0 readability-lxml==0.8.1 From f8e88a146a12754221517246d0a161e223db795a Mon Sep 17 00:00:00 2001 From: hunteraraujo Date: Thu, 6 Apr 2023 09:59:20 -0700 Subject: [PATCH 34/69] Create PULL_REQUEST_TEMPLATE.md --- .github/PULL_REQUEST_TEMPLATE.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..6397f78c0 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,18 @@ +### Background + + + +### Changes + + + +### Test Plan + + + +### Change Safety + +- [ ] I have added tests to cover my changes +- [ ] I have considered potential risks and mitigations for my changes + + From 85428df9fc1dd72bf5a959c92e37803fa9fcc8bc Mon Sep 17 00:00:00 2001 From: hunteraraujo Date: Thu, 6 Apr 2023 10:07:49 -0700 Subject: [PATCH 35/69] Update PULL_REQUEST_TEMPLATE.md --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6397f78c0..cb8ce34a1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,7 +8,7 @@ ### Test Plan - + ### Change Safety From 6819799ebe435e8e512bf07f1cd1d52f58a1b2f9 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Thu, 6 Apr 2023 22:25:17 -0500 Subject: [PATCH 36/69] Create an abstract MemoryProviderSingleton class. Pass config instead of instantiating a new one where used. --- scripts/commands.py | 4 +-- scripts/config.py | 7 ++++- scripts/main.py | 4 +-- scripts/memory/__init__.py | 0 scripts/memory/base.py | 34 +++++++++++++++++++++++ scripts/{memory.py => memory/pinecone.py} | 18 +++--------- scripts/memory/redismem.py | 0 7 files changed, 48 insertions(+), 19 deletions(-) create mode 100644 scripts/memory/__init__.py create mode 100644 scripts/memory/base.py rename scripts/{memory.py => memory/pinecone.py} (80%) create mode 100644 scripts/memory/redismem.py diff --git a/scripts/commands.py b/scripts/commands.py index fc10d1d05..f00875f06 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,6 +1,6 @@ import browse import json -from memory import PineconeMemory +from memory.pinecone import PineconeMemory import datetime import agent_manager as agents import speak @@ -52,7 +52,7 @@ def get_command(response): def execute_command(command_name, arguments): - memory = PineconeMemory() + memory = PineconeMemory(cfg=cfg) try: if command_name == "google": diff --git a/scripts/config.py b/scripts/config.py index fe48d2980..1b716a3eb 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -1,3 +1,4 @@ +import abc import os import openai from dotenv import load_dotenv @@ -5,7 +6,7 @@ from dotenv import load_dotenv load_dotenv() -class Singleton(type): +class Singleton(abc.ABCMeta, type): """ Singleton metaclass for ensuring only one instance of a class. """ @@ -20,6 +21,10 @@ class Singleton(type): return cls._instances[cls] +class AbstractSingleton(abc.ABC, metaclass=Singleton): + pass + + class Config(metaclass=Singleton): """ Configuration class to store the state of bools for different scripts access. diff --git a/scripts/main.py b/scripts/main.py index a79fd553c..acb63a39b 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,7 +1,7 @@ import json import random import commands as cmd -from memory import PineconeMemory +from memory.pinecone import PineconeMemory import data import chat from colorama import Fore, Style @@ -283,7 +283,7 @@ user_input = "Determine which next command to use, and respond using the format # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory -memory = PineconeMemory() +memory = PineconeMemory(cfg) memory.clear() print('Using memory of type: ' + memory.__class__.__name__) diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/memory/base.py b/scripts/memory/base.py new file mode 100644 index 000000000..29f5d56be --- /dev/null +++ b/scripts/memory/base.py @@ -0,0 +1,34 @@ +import abc +from config import AbstractSingleton +import openai + + +def get_ada_embedding(text): + text = text.replace("\n", " ") + return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] + + +def get_text_from_embedding(embedding): + return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"] + + +class MemoryProviderSingleton(AbstractSingleton): + @abc.abstractmethod + def add(self, data): + pass + + @abc.abstractmethod + def get(self, data): + pass + + @abc.abstractmethod + def clear(self): + pass + + @abc.abstractmethod + def get_relevant(self, data, num_relevant=5): + pass + + @abc.abstractmethod + def get_stats(self): + pass diff --git a/scripts/memory.py b/scripts/memory/pinecone.py similarity index 80% rename from scripts/memory.py rename to scripts/memory/pinecone.py index 0d265a31d..8e1eaa570 100644 --- a/scripts/memory.py +++ b/scripts/memory/pinecone.py @@ -1,21 +1,11 @@ -from config import Config, Singleton + import pinecone -import openai -cfg = Config() +from memory.base import MemoryProviderSingleton, get_ada_embedding -def get_ada_embedding(text): - text = text.replace("\n", " ") - return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] - - -def get_text_from_embedding(embedding): - return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"] - - -class PineconeMemory(metaclass=Singleton): - def __init__(self): +class PineconeMemory(MemoryProviderSingleton): + def __init__(self, cfg): pinecone_api_key = cfg.pinecone_api_key pinecone_region = cfg.pinecone_region pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py new file mode 100644 index 000000000..e69de29bb From 57412bcf4e85c7edff4f021c34390658ee80eb06 Mon Sep 17 00:00:00 2001 From: blankey1337 <42594751+blankey1337@users.noreply.github.com> Date: Thu, 6 Apr 2023 21:16:05 -0700 Subject: [PATCH 37/69] add kandinsky support - cuda issue --- scripts/image_gen.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 scripts/image_gen.py diff --git a/scripts/image_gen.py b/scripts/image_gen.py new file mode 100644 index 000000000..cdc4fc4d7 --- /dev/null +++ b/scripts/image_gen.py @@ -0,0 +1,44 @@ +from kandinsky2 import get_kandinsky2 +from config import Config + +cfg = Config() + +def generate_image(prompt): + + model = get_kandinsky2('cuda', task_type='text2img', model_version='2.1', use_flash_attention=False) + images = model.generate_text2img( + "red cat, 4k photo", # prompt + num_steps=100, + batch_size=1, + guidance_scale=4, + h=768, w=768, + sampler='p_sampler', + prior_cf_scale=4, + prior_steps="5" + ) + return images + + # base_url = 'http://export.arxiv.org/api/query?' + # query = f'search_query=all:{search_query}&start=0&max_results={max_results}' + # url = base_url + query + # response = requests.get(url) + + # if response.status_code == 200: + # soup = BeautifulSoup(response.content, 'xml') + # entries = soup.find_all('entry') + + # articles = [] + # for entry in entries: + # title = entry.title.text.strip() + # url = entry.id.text.strip() + # published = entry.published.text.strip() + + # articles.append({ + # 'title': title, + # 'url': url, + # 'published': published + # }) + + # return articles + # else: + # return None From 5a1d9e6d0a1752cf08cf747f9279f8b316f3a8c4 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 00:08:25 -0500 Subject: [PATCH 38/69] Implement redis memory backend. --- README.md | 21 ++++++ requirements.txt | 1 + scripts/commands.py | 6 +- scripts/config.py | 7 +- scripts/main.py | 8 ++- scripts/memory/base.py | 1 + scripts/memory/redismem.py | 135 +++++++++++++++++++++++++++++++++++++ 7 files changed, 175 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a89c5d03b..921f297ee 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,27 @@ are loaded for the agent at any given time. 2. Choose the `Starter` plan to avoid being charged. 3. Find your API key and region under the default project in the left sidebar. + +## Redis Setup + +Install docker desktop. + +Run: +``` +docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest +``` + +Set the following environment variables: +``` +MEMORY_BACKEND=redis +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +``` + +Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really. + + ### Setting up environment variables For Windows Users: ``` diff --git a/requirements.txt b/requirements.txt index ce2470985..9cfddad62 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,3 +12,4 @@ docker duckduckgo-search google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 +redis \ No newline at end of file diff --git a/scripts/commands.py b/scripts/commands.py index f00875f06..98be77727 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,6 +1,7 @@ import browse import json from memory.pinecone import PineconeMemory +from memory.redismem import RedisMemory import datetime import agent_manager as agents import speak @@ -52,7 +53,10 @@ def get_command(response): def execute_command(command_name, arguments): - memory = PineconeMemory(cfg=cfg) + if cfg.memory_backend == "pinecone": + memory = PineconeMemory(cfg=cfg) + else: + memory = RedisMemory(cfg=cfg) try: if command_name == "google": diff --git a/scripts/config.py b/scripts/config.py index 1b716a3eb..77498d6c9 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -61,7 +61,12 @@ class Config(metaclass=Singleton): # User agent headers to use when browsing web # Some websites might just completely deny request with an error code if no user agent was found. self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} - + self.redis_host = os.getenv("REDIS_HOST") + self.redis_port = os.getenv("REDIS_PORT") + self.redis_password = os.getenv("REDIS_PASSWORD") + # Note that indexes must be created on db 0 in redis, this is not configureable. + + self.memory_backend = os.getenv("MEMORY_BACKEND", 'pinecone') # Initialize the OpenAI API client openai.api_key = self.openai_api_key diff --git a/scripts/main.py b/scripts/main.py index acb63a39b..eecdd7f80 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -2,6 +2,7 @@ import json import random import commands as cmd from memory.pinecone import PineconeMemory +from memory.redismem import RedisMemory import data import chat from colorama import Fore, Style @@ -283,8 +284,11 @@ user_input = "Determine which next command to use, and respond using the format # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory -memory = PineconeMemory(cfg) -memory.clear() +if cfg.memory_backend == "pinecone": + memory = PineconeMemory(cfg) + memory.clear() +else: + memory = RedisMemory(cfg) print('Using memory of type: ' + memory.__class__.__name__) diff --git a/scripts/memory/base.py b/scripts/memory/base.py index 29f5d56be..72349f6be 100644 --- a/scripts/memory/base.py +++ b/scripts/memory/base.py @@ -1,3 +1,4 @@ +"""Base class for memory providers.""" import abc from config import AbstractSingleton import openai diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py index e69de29bb..162b9269b 100644 --- a/scripts/memory/redismem.py +++ b/scripts/memory/redismem.py @@ -0,0 +1,135 @@ +"""Redis memory provider.""" +from typing import Any, List, Optional +import redis +from redis.commands.search.field import VectorField, TextField +from redis.commands.search.query import Query +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +import traceback +import numpy as np + +from memory.base import MemoryProviderSingleton, get_ada_embedding + + +SCHEMA = [ + TextField("data"), + VectorField( + "embedding", + "HNSW", + { + "TYPE": "FLOAT32", + "DIM": 1536, + "DISTANCE_METRIC": "COSINE" + } + ), +] + + +class RedisMemory(MemoryProviderSingleton): + def __init__(self, cfg): + """ + Initializes the Redis memory provider. + + Args: + cfg: The config object. + + Returns: None + """ + redis_host = cfg.redis_host + redis_port = cfg.redis_port + redis_password = cfg.redis_password + self.dimension = 1536 + self.redis = redis.Redis( + host=redis_host, + port=redis_port, + password=redis_password, + db=0 # Cannot be changed + ) + self.redis.flushall() + try: + self.redis.ft("gpt").create_index( + fields=SCHEMA, + definition=IndexDefinition( + prefix=["gpt:"], + index_type=IndexType.HASH + ) + ) + except Exception as e: + print("Error creating Redis search index: ", e) + self.vec_num = 0 + + def add(self, data: str) -> str: + """ + Adds a data point to the memory. + + Args: + data: The data to add. + + Returns: Message indicating that the data has been added. + """ + vector = get_ada_embedding(data) + vector = np.array(vector).astype(np.float32).tobytes() + data_dict = { + b"data": data, + "embedding": vector + } + self.redis.hset(f"gpt:{self.vec_num}", mapping=data_dict) + _text = f"Inserting data into memory at index: {self.vec_num}:\n"\ + f"data: {data}" + self.vec_num += 1 + return _text + + def get(self, data: str) -> Optional[List[Any]]: + """ + Gets the data from the memory that is most relevant to the given data. + + Args: + data: The data to compare to. + + Returns: The most relevant data. + """ + return self.get_relevant(data, 1) + + def clear(self) -> str: + """ + Clears the redis server. + + Returns: A message indicating that the memory has been cleared. + """ + self.redis.flushall() + return "Obliviated" + + def get_relevant( + self, + data: str, + num_relevant: int = 5 + ) -> Optional[List[Any]]: + """ + Returns all the data in the memory that is relevant to the given data. + Args: + data: The data to compare to. + num_relevant: The number of relevant data to return. + + Returns: A list of the most relevant data. + """ + query_embedding = get_ada_embedding(data) + base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" + query = Query(base_query).return_fields( + "data", + "vector_score" + ).sort_by("vector_score").dialect(2) + query_vector = np.array(query_embedding).astype(np.float32).tobytes() + + try: + results = self.redis.ft("gpt").search( + query, query_params={"vector": query_vector} + ) + except Exception as e: + print("Error calling Redis search: ", e) + return None + return list(results.docs) + + def get_stats(self): + """ + Returns: The stats of the memory index. + """ + return self.redis.ft("mem").info() From cce79695fa43a9abb5aca8e368f7951924c3ae9c Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 00:48:27 -0500 Subject: [PATCH 39/69] Save redis memory state, with the default being to wipe on start still. --- scripts/config.py | 3 ++- scripts/memory/redismem.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 77498d6c9..8c582a157 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -64,8 +64,9 @@ class Config(metaclass=Singleton): self.redis_host = os.getenv("REDIS_HOST") self.redis_port = os.getenv("REDIS_PORT") self.redis_password = os.getenv("REDIS_PASSWORD") + self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' # Note that indexes must be created on db 0 in redis, this is not configureable. - + self.memory_backend = os.getenv("MEMORY_BACKEND", 'pinecone') # Initialize the OpenAI API client openai.api_key = self.openai_api_key diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py index 162b9269b..e7021066f 100644 --- a/scripts/memory/redismem.py +++ b/scripts/memory/redismem.py @@ -44,7 +44,8 @@ class RedisMemory(MemoryProviderSingleton): password=redis_password, db=0 # Cannot be changed ) - self.redis.flushall() + if cfg.wipe_redis_on_start: + self.redis.flushall() try: self.redis.ft("gpt").create_index( fields=SCHEMA, @@ -55,7 +56,9 @@ class RedisMemory(MemoryProviderSingleton): ) except Exception as e: print("Error creating Redis search index: ", e) - self.vec_num = 0 + existing_vec_num = self.redis.get('vec_num') + self.vec_num = int(existing_vec_num.decode('utf-8')) if\ + existing_vec_num else 0 def add(self, data: str) -> str: """ @@ -72,10 +75,13 @@ class RedisMemory(MemoryProviderSingleton): b"data": data, "embedding": vector } - self.redis.hset(f"gpt:{self.vec_num}", mapping=data_dict) + pipe = self.redis.pipeline() + pipe.hset(f"gpt:{self.vec_num}", mapping=data_dict) _text = f"Inserting data into memory at index: {self.vec_num}:\n"\ f"data: {data}" self.vec_num += 1 + pipe.set('vec_num', self.vec_num) + pipe.execute() return _text def get(self, data: str) -> Optional[List[Any]]: From 43746b1396fe47feae9447a72bbaa15ce2c0960a Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 00:58:57 -0500 Subject: [PATCH 40/69] Update README with WIPE_REDIS_ON_START setting. --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 921f297ee..7d83b4633 100644 --- a/README.md +++ b/README.md @@ -169,6 +169,13 @@ REDIS_PASSWORD= Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really. +You can optionally set + +``` +WIPE_REDIS_ON_START=False +``` + +To persist memory stored in Redis. ### Setting up environment variables For Windows Users: From 28cc9865e487dec005ef98cffca059cf81c1c806 Mon Sep 17 00:00:00 2001 From: blankey1337 <42594751+blankey1337@users.noreply.github.com> Date: Fri, 7 Apr 2023 08:02:48 -0700 Subject: [PATCH 41/69] feat(ImageGen): add stable diffusion support --- scripts/commands.py | 3 +++ scripts/config.py | 2 ++ scripts/data/prompt.txt | 1 + scripts/image_gen.py | 54 +++++++++++++++-------------------------- 4 files changed, 25 insertions(+), 35 deletions(-) diff --git a/scripts/commands.py b/scripts/commands.py index fc10d1d05..bf8d79833 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -9,6 +9,7 @@ import ai_functions as ai from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files from execute_code import execute_python_file from json_parser import fix_and_parse_json +from image_gen import generate_image from duckduckgo_search import ddg from googleapiclient.discovery import build from googleapiclient.errors import HttpError @@ -102,6 +103,8 @@ def execute_command(command_name, arguments): return ai.write_tests(arguments["code"], arguments.get("focus")) elif command_name == "execute_python_file": # Add this command return execute_python_file(arguments["file"]) + elif command_name == "generate_image": # Add this command + return generate_image(arguments["prompt"]) elif command_name == "task_complete": shutdown() else: diff --git a/scripts/config.py b/scripts/config.py index fe48d2980..2eca16751 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -53,6 +53,8 @@ class Config(metaclass=Singleton): self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_region = os.getenv("PINECONE_ENV") + self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") + # User agent headers to use when browsing web # Some websites might just completely deny request with an error code if no user agent was found. self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index 28797d9e2..363342c07 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -23,6 +23,7 @@ COMMANDS: 17. Write Tests: "write_tests", args: "code": "", "focus": "" 18. Execute Python File: "execute_python_file", args: "file": "" 19. Task Complete (Shutdown): "task_complete", args: "reason": "" +20. Generate Image: "generate_image", args: "prompt": "" RESOURCES: diff --git a/scripts/image_gen.py b/scripts/image_gen.py index cdc4fc4d7..bb3e7686e 100644 --- a/scripts/image_gen.py +++ b/scripts/image_gen.py @@ -1,44 +1,28 @@ -from kandinsky2 import get_kandinsky2 +import requests +import io +import os.path +from PIL import Image from config import Config +import uuid cfg = Config() +working_directory = "auto_gpt_workspace" + +API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" +headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} + def generate_image(prompt): - - model = get_kandinsky2('cuda', task_type='text2img', model_version='2.1', use_flash_attention=False) - images = model.generate_text2img( - "red cat, 4k photo", # prompt - num_steps=100, - batch_size=1, - guidance_scale=4, - h=768, w=768, - sampler='p_sampler', - prior_cf_scale=4, - prior_steps="5" - ) - return images - - # base_url = 'http://export.arxiv.org/api/query?' - # query = f'search_query=all:{search_query}&start=0&max_results={max_results}' - # url = base_url + query - # response = requests.get(url) + response = requests.post(API_URL, headers=headers, json={ + "inputs": prompt, + }) + image = Image.open(io.BytesIO(response.content)) + print("Image Generated for prompt:" + prompt) - # if response.status_code == 200: - # soup = BeautifulSoup(response.content, 'xml') - # entries = soup.find_all('entry') + filename = str(uuid.uuid4()) + ".jpg" - # articles = [] - # for entry in entries: - # title = entry.title.text.strip() - # url = entry.id.text.strip() - # published = entry.published.text.strip() + image.save(os.path.join(working_directory, filename)) - # articles.append({ - # 'title': title, - # 'url': url, - # 'published': published - # }) + print("Saved to disk:" + filename) - # return articles - # else: - # return None + return str("Image " + filename + " saved to disk for prompt: " + prompt) From b56b04e86f4bcd93297cbe48efb8d9117be2566e Mon Sep 17 00:00:00 2001 From: blankey1337 <42594751+blankey1337@users.noreply.github.com> Date: Fri, 7 Apr 2023 11:03:23 -0700 Subject: [PATCH 42/69] feat(ImageGen): add DALL-E support --- scripts/image_gen.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/scripts/image_gen.py b/scripts/image_gen.py index bb3e7686e..92cda2908 100644 --- a/scripts/image_gen.py +++ b/scripts/image_gen.py @@ -4,6 +4,8 @@ import os.path from PIL import Image from config import Config import uuid +import openai +from base64 import b64decode cfg = Config() @@ -13,16 +15,36 @@ API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion- headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} def generate_image(prompt): + + filename = str(uuid.uuid4()) + ".jpg" + + # DALL-E + openai.api_key = cfg.openai_api_key + + response = openai.Image.create( + prompt=prompt, + n=1, + size="256x256", + response_format="b64_json", + ) + + print("Image Generated for prompt:" + prompt) + print(response["data"][0]["b64_json"][:50]) + + image_data = b64decode(response["data"][0]["b64_json"]) + with open(working_directory + "/" + filename, mode="wb") as png: + png.write(image_data) + + return "Saved to disk:" + filename + + # STABLE DIFFUSION response = requests.post(API_URL, headers=headers, json={ "inputs": prompt, }) image = Image.open(io.BytesIO(response.content)) print("Image Generated for prompt:" + prompt) - filename = str(uuid.uuid4()) + ".jpg" - image.save(os.path.join(working_directory, filename)) - print("Saved to disk:" + filename) return str("Image " + filename + " saved to disk for prompt: " + prompt) From f3e64ec4e9128d4757bf5ffadbb73a9b144b2ecb Mon Sep 17 00:00:00 2001 From: blankey1337 <42594751+blankey1337@users.noreply.github.com> Date: Fri, 7 Apr 2023 11:29:43 -0700 Subject: [PATCH 43/69] feat(ImageGen): support env vars, update readme --- .env.template | 4 ++- README.md | 11 +++++++++ scripts/image_gen.py | 59 +++++++++++++++++++++++++------------------- 3 files changed, 48 insertions(+), 26 deletions(-) diff --git a/.env.template b/.env.template index e9ccda5ed..525cd61c5 100644 --- a/.env.template +++ b/.env.template @@ -9,4 +9,6 @@ CUSTOM_SEARCH_ENGINE_ID= USE_AZURE=False OPENAI_API_BASE=your-base-url-for-azure OPENAI_API_VERSION=api-version-for-azure -OPENAI_DEPLOYMENT_ID=deployment-id-for-azure \ No newline at end of file +OPENAI_DEPLOYMENT_ID=deployment-id-for-azure +IMAGE_PROVIDER=dalle +HUGGINGFACE_API_TOKEN= \ No newline at end of file diff --git a/README.md b/README.md index a89c5d03b..f6cf6093e 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ Your support is greatly appreciated - [Setting up environment variables](#setting-up-environment-variables) - [πŸ’€ Continuous Mode ⚠️](#-continuous-mode-️) - [GPT3.5 ONLY Mode](#gpt35-only-mode) + - [πŸ–Ό Image Generation](#image-generation) - [⚠️ Limitations](#️-limitations) - [πŸ›‘ Disclaimer](#-disclaimer) - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) @@ -169,6 +170,7 @@ Or you can set them in the `.env` file. 1. View memory usage by using the `--debug` flag :) + ## πŸ’€ Continuous Mode ⚠️ Run the AI **without** user authorisation, 100% automated. Continuous mode is not recommended. @@ -187,6 +189,15 @@ If you don't have access to the GPT4 api, this mode will allow you to use Auto-G python scripts/main.py --gpt3only ``` +## πŸ–Ό Image Generation +By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required. + +Once you have a token, set these variables in your `.env`: +``` +IMAGE_PROVIDER=sd +HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" +``` + ## ⚠️ Limitations This experiment aims to showcase the potential of GPT-4 but comes with some limitations: diff --git a/scripts/image_gen.py b/scripts/image_gen.py index 92cda2908..deda7ed5a 100644 --- a/scripts/image_gen.py +++ b/scripts/image_gen.py @@ -11,40 +11,49 @@ cfg = Config() working_directory = "auto_gpt_workspace" -API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" -headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} - def generate_image(prompt): filename = str(uuid.uuid4()) + ".jpg" - + # DALL-E - openai.api_key = cfg.openai_api_key + if cfg.image_provider == 'dalle': + + openai.api_key = cfg.openai_api_key - response = openai.Image.create( - prompt=prompt, - n=1, - size="256x256", - response_format="b64_json", - ) + response = openai.Image.create( + prompt=prompt, + n=1, + size="256x256", + response_format="b64_json", + ) - print("Image Generated for prompt:" + prompt) - print(response["data"][0]["b64_json"][:50]) + print("Image Generated for prompt:" + prompt) + print(response["data"][0]["b64_json"][:50]) - image_data = b64decode(response["data"][0]["b64_json"]) - with open(working_directory + "/" + filename, mode="wb") as png: - png.write(image_data) + image_data = b64decode(response["data"][0]["b64_json"]) - return "Saved to disk:" + filename + with open(working_directory + "/" + filename, mode="wb") as png: + png.write(image_data) + + return "Saved to disk:" + filename # STABLE DIFFUSION - response = requests.post(API_URL, headers=headers, json={ - "inputs": prompt, - }) - image = Image.open(io.BytesIO(response.content)) - print("Image Generated for prompt:" + prompt) + elif cfg.image_provider == 'sd': - image.save(os.path.join(working_directory, filename)) - print("Saved to disk:" + filename) + API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" + headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} - return str("Image " + filename + " saved to disk for prompt: " + prompt) + response = requests.post(API_URL, headers=headers, json={ + "inputs": prompt, + }) + + image = Image.open(io.BytesIO(response.content)) + print("Image Generated for prompt:" + prompt) + + image.save(os.path.join(working_directory, filename)) + print("Saved to disk:" + filename) + + return str("Image " + filename + " saved to disk for prompt: " + prompt) + + else: + return "No Image Provider Set" \ No newline at end of file From 091db1d4c3db6a3bf4bc50e882f299719bc65c60 Mon Sep 17 00:00:00 2001 From: blankey1337 <42594751+blankey1337@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:47:49 -0700 Subject: [PATCH 44/69] chore(ImageGen): cleanup --- scripts/config.py | 1 + scripts/image_gen.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 2eca16751..959c3eb22 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -53,6 +53,7 @@ class Config(metaclass=Singleton): self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_region = os.getenv("PINECONE_ENV") + self.image_provider = os.getenv("IMAGE_PROVIDER") self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") # User agent headers to use when browsing web diff --git a/scripts/image_gen.py b/scripts/image_gen.py index deda7ed5a..185ed4278 100644 --- a/scripts/image_gen.py +++ b/scripts/image_gen.py @@ -17,7 +17,7 @@ def generate_image(prompt): # DALL-E if cfg.image_provider == 'dalle': - + openai.api_key = cfg.openai_api_key response = openai.Image.create( @@ -28,7 +28,6 @@ def generate_image(prompt): ) print("Image Generated for prompt:" + prompt) - print(response["data"][0]["b64_json"][:50]) image_data = b64decode(response["data"][0]["b64_json"]) @@ -51,9 +50,8 @@ def generate_image(prompt): print("Image Generated for prompt:" + prompt) image.save(os.path.join(working_directory, filename)) - print("Saved to disk:" + filename) - return str("Image " + filename + " saved to disk for prompt: " + prompt) + return "Saved to disk:" + filename else: return "No Image Provider Set" \ No newline at end of file From f0162037c341e31583d09626da2c853563cc4776 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 15:02:22 -0500 Subject: [PATCH 45/69] Fix README --- README.md | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 7d83b4633..b7f514e83 100644 --- a/README.md +++ b/README.md @@ -140,16 +140,6 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" ``` -## 🌲 Pinecone API Key Setup - -Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories -are loaded for the agent at any given time. - -1. Go to app.pinecone.io and make an account if you don't already have one. -2. Choose the `Starter` plan to avoid being charged. -3. Find your API key and region under the default project in the left sidebar. - - ## Redis Setup Install docker desktop. @@ -177,6 +167,15 @@ WIPE_REDIS_ON_START=False To persist memory stored in Redis. +## 🌲 Pinecone API Key Setup + +Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories +are loaded for the agent at any given time. + +1. Go to app.pinecone.io and make an account if you don't already have one. +2. Choose the `Starter` plan to avoid being charged. +3. Find your API key and region under the default project in the left sidebar. + ### Setting up environment variables For Windows Users: ``` From 5d13fb2546916f2b5ff360720b07706ab31e6e21 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 15:03:20 -0500 Subject: [PATCH 46/69] Remove unused function. --- scripts/memory/base.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/memory/base.py b/scripts/memory/base.py index 72349f6be..d7ab7fcf1 100644 --- a/scripts/memory/base.py +++ b/scripts/memory/base.py @@ -9,10 +9,6 @@ def get_ada_embedding(text): return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] -def get_text_from_embedding(embedding): - return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"] - - class MemoryProviderSingleton(AbstractSingleton): @abc.abstractmethod def add(self, data): From 14e10c9c4ddc1d0736b4161e96d0c2517c65b12a Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 15:27:48 -0500 Subject: [PATCH 47/69] Add configurable index key for redis. --- scripts/config.py | 1 + scripts/memory/redismem.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 8c582a157..637c17fdf 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -65,6 +65,7 @@ class Config(metaclass=Singleton): self.redis_port = os.getenv("REDIS_PORT") self.redis_password = os.getenv("REDIS_PASSWORD") self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' + self.memory_index = os.getenv("MEMORY_INDEX", 'gpt') # Note that indexes must be created on db 0 in redis, this is not configureable. self.memory_backend = os.getenv("MEMORY_BACKEND", 'pinecone') diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py index e7021066f..20be4a4e8 100644 --- a/scripts/memory/redismem.py +++ b/scripts/memory/redismem.py @@ -44,19 +44,20 @@ class RedisMemory(MemoryProviderSingleton): password=redis_password, db=0 # Cannot be changed ) + self.cfg = cfg if cfg.wipe_redis_on_start: self.redis.flushall() try: - self.redis.ft("gpt").create_index( + self.redis.ft(f"{cfg.memory_index}").create_index( fields=SCHEMA, definition=IndexDefinition( - prefix=["gpt:"], + prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH ) ) except Exception as e: print("Error creating Redis search index: ", e) - existing_vec_num = self.redis.get('vec_num') + existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num') self.vec_num = int(existing_vec_num.decode('utf-8')) if\ existing_vec_num else 0 @@ -76,11 +77,11 @@ class RedisMemory(MemoryProviderSingleton): "embedding": vector } pipe = self.redis.pipeline() - pipe.hset(f"gpt:{self.vec_num}", mapping=data_dict) + pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict) _text = f"Inserting data into memory at index: {self.vec_num}:\n"\ f"data: {data}" self.vec_num += 1 - pipe.set('vec_num', self.vec_num) + pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num) pipe.execute() return _text @@ -126,7 +127,7 @@ class RedisMemory(MemoryProviderSingleton): query_vector = np.array(query_embedding).astype(np.float32).tobytes() try: - results = self.redis.ft("gpt").search( + results = self.redis.ft(f"{self.cfg.memory_index}").search( query, query_params={"vector": query_vector} ) except Exception as e: @@ -138,4 +139,4 @@ class RedisMemory(MemoryProviderSingleton): """ Returns: The stats of the memory index. """ - return self.redis.ft("mem").info() + return self.redis.ft(f"{self.cfg.memory_index}").info() From ea6b97050948487cee5ee50a12f7eb2a161e0648 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 15:28:48 -0500 Subject: [PATCH 48/69] Update README --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b7f514e83..5ce001b09 100644 --- a/README.md +++ b/README.md @@ -167,6 +167,12 @@ WIPE_REDIS_ON_START=False To persist memory stored in Redis. +You can specify the memory index for redis using the following: + +```` +MEMORY_INDEX=whatever +```` + ## 🌲 Pinecone API Key Setup Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories From cb14c8d999c32c89215be04d27fe132a149eb047 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 18:13:18 -0500 Subject: [PATCH 49/69] Implement local memory. --- requirements.txt | 3 +- scripts/commands.py | 8 ++- scripts/config.py | 4 +- scripts/main.py | 5 +- scripts/memory/local.py | 111 +++++++++++++++++++++++++++++++++++++ scripts/memory/redismem.py | 1 - 6 files changed, 125 insertions(+), 7 deletions(-) create mode 100644 scripts/memory/local.py diff --git a/requirements.txt b/requirements.txt index 9cfddad62..5bcc74957 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,5 @@ docker duckduckgo-search google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 -redis \ No newline at end of file +redis +orjson \ No newline at end of file diff --git a/scripts/commands.py b/scripts/commands.py index 98be77727..a88ad0ae0 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,5 +1,6 @@ import browse import json +from memory.local import LocalCache from memory.pinecone import PineconeMemory from memory.redismem import RedisMemory import datetime @@ -55,11 +56,14 @@ def get_command(response): def execute_command(command_name, arguments): if cfg.memory_backend == "pinecone": memory = PineconeMemory(cfg=cfg) - else: + elif cfg.memory_backend == "redis": memory = RedisMemory(cfg=cfg) + else: + memory = LocalCache(cfg=cfg) + try: if command_name == "google": - + # Check if the Google API key is set and use the official search method # If the API key is not set or has only whitespaces, use the unofficial search method if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None): diff --git a/scripts/config.py b/scripts/config.py index 637c17fdf..9afeb1d25 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -65,10 +65,10 @@ class Config(metaclass=Singleton): self.redis_port = os.getenv("REDIS_PORT") self.redis_password = os.getenv("REDIS_PASSWORD") self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' - self.memory_index = os.getenv("MEMORY_INDEX", 'gpt') + self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt') # Note that indexes must be created on db 0 in redis, this is not configureable. - self.memory_backend = os.getenv("MEMORY_BACKEND", 'pinecone') + self.memory_backend = os.getenv("MEMORY_BACKEND", 'local') # Initialize the OpenAI API client openai.api_key = self.openai_api_key diff --git a/scripts/main.py b/scripts/main.py index eecdd7f80..e49f1810d 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,6 +1,7 @@ import json import random import commands as cmd +from memory.local import LocalCache from memory.pinecone import PineconeMemory from memory.redismem import RedisMemory import data @@ -287,8 +288,10 @@ user_input = "Determine which next command to use, and respond using the format if cfg.memory_backend == "pinecone": memory = PineconeMemory(cfg) memory.clear() -else: +elif cfg.memory_backend == "redis": memory = RedisMemory(cfg) +else: + memory = LocalCache(cfg) print('Using memory of type: ' + memory.__class__.__name__) diff --git a/scripts/memory/local.py b/scripts/memory/local.py new file mode 100644 index 000000000..fb1052242 --- /dev/null +++ b/scripts/memory/local.py @@ -0,0 +1,111 @@ +import dataclasses +import orjson +from typing import Any, List, Optional +import numpy as np +import os +from memory.base import MemoryProviderSingleton, get_ada_embedding + + +EMBED_DIM = 1536 +SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS + + +def create_default_embeddings(): + return np.zeros((0, EMBED_DIM)).astype(np.float32) + + +@dataclasses.dataclass +class CacheContent: + texts: List[str] = dataclasses.field(default_factory=list) + embeddings: np.ndarray = dataclasses.field( + default_factory=create_default_embeddings + ) + + +class LocalCache(MemoryProviderSingleton): + + # on load, load our database + def __init__(self, cfg) -> None: + self.filename = f"{cfg.memory_index}.json" + if os.path.exists(self.filename): + with open(self.filename, 'rb') as f: + loaded = orjson.loads(f.read()) + self.data = CacheContent(**loaded) + else: + self.data = CacheContent() + + def add(self, text: str): + """ + Add text to our list of texts, add embedding as row to our + embeddings-matrix + + Args: + text: str + + Returns: None + """ + self.data.texts.append(text) + + embedding = get_ada_embedding(text) + + vector = np.array(embedding).astype(np.float32) + vector = vector[np.newaxis, :] + self.data.embeddings = np.concatenate( + [ + vector, + self.data.embeddings, + ], + axis=0, + ) + + with open(self.filename, 'wb') as f: + out = orjson.dumps( + self.data, + option=SAVE_OPTIONS + ) + f.write(out) + + def clear(self) -> str: + """ + Clears the redis server. + + Returns: A message indicating that the memory has been cleared. + """ + self.data = CacheContent() + return "Obliviated" + + def get(self, data: str) -> Optional[List[Any]]: + """ + Gets the data from the memory that is most relevant to the given data. + + Args: + data: The data to compare to. + + Returns: The most relevant data. + """ + return self.get_relevant(data, 1) + + def get_relevant(self, text: str, k: int) -> List[Any]: + """" + matrix-vector mult to find score-for-each-row-of-matrix + get indices for top-k winning scores + return texts for those indices + Args: + text: str + k: int + + Returns: List[str] + """ + embedding = get_ada_embedding(text) + + scores = np.dot(self.data.embeddings, embedding) + + top_k_indices = np.argsort(scores)[-k:][::-1] + + return [self.data.texts[i] for i in top_k_indices] + + def get_stats(self): + """ + Returns: The stats of the local cache. + """ + return len(self.data.texts), self.data.embeddings.shape diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py index 20be4a4e8..296d0cce2 100644 --- a/scripts/memory/redismem.py +++ b/scripts/memory/redismem.py @@ -4,7 +4,6 @@ import redis from redis.commands.search.field import VectorField, TextField from redis.commands.search.query import Query from redis.commands.search.indexDefinition import IndexDefinition, IndexType -import traceback import numpy as np from memory.base import MemoryProviderSingleton, get_ada_embedding From 503b58b7948fe3a37622919864015607352e76e6 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 18:30:04 -0500 Subject: [PATCH 50/69] Refactor memory into factory. --- scripts/commands.py | 11 ++-------- scripts/main.py | 12 ++--------- scripts/memory/__init__.py | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 19 deletions(-) diff --git a/scripts/commands.py b/scripts/commands.py index a88ad0ae0..783e6bd29 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -1,8 +1,6 @@ import browse import json -from memory.local import LocalCache -from memory.pinecone import PineconeMemory -from memory.redismem import RedisMemory +from memory import get_memory import datetime import agent_manager as agents import speak @@ -54,12 +52,7 @@ def get_command(response): def execute_command(command_name, arguments): - if cfg.memory_backend == "pinecone": - memory = PineconeMemory(cfg=cfg) - elif cfg.memory_backend == "redis": - memory = RedisMemory(cfg=cfg) - else: - memory = LocalCache(cfg=cfg) + memory = get_memory(cfg) try: if command_name == "google": diff --git a/scripts/main.py b/scripts/main.py index e49f1810d..11bf0dc1b 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -1,9 +1,7 @@ import json import random import commands as cmd -from memory.local import LocalCache -from memory.pinecone import PineconeMemory -from memory.redismem import RedisMemory +from memory import get_memory import data import chat from colorama import Fore, Style @@ -285,13 +283,7 @@ user_input = "Determine which next command to use, and respond using the format # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory -if cfg.memory_backend == "pinecone": - memory = PineconeMemory(cfg) - memory.clear() -elif cfg.memory_backend == "redis": - memory = RedisMemory(cfg) -else: - memory = LocalCache(cfg) +memory = get_memory(cfg, init=True) print('Using memory of type: ' + memory.__class__.__name__) diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index e69de29bb..dacb05b32 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -0,0 +1,42 @@ +from memory.local import LocalCache +try: + from memory.redismem import RedisMemory +except ImportError: + print("Redis not installed. Skipping import.") + RedisMemory = None + +try: + from memory.pinecone import PineconeMemory +except ImportError: + print("Pinecone not installed. Skipping import.") + PineconeMemory = None + + +def get_memory(cfg, init=False): + memory = None + if cfg.memory_backend == "pinecone": + if not PineconeMemory: + print("Error: Pinecone is not installed. Please install pinecone" + " to use Pinecone as a memory backend.") + else: + memory = PineconeMemory(cfg) + if init: + memory.clear() + elif cfg.memory_backend == "redis": + if not RedisMemory: + print("Error: Redis is not installed. Please install redis-py to" + " use Redis as a memory backend.") + else: + memory = RedisMemory(cfg) + + if memory is None: + memory = LocalCache(cfg) + return memory + + +__all__ = [ + "get_memory", + "LocalCache", + "RedisCache", + "PineconeCache", +] From b983faa95339843f170258bd06923f6b81c1dc42 Mon Sep 17 00:00:00 2001 From: keenborder786 <21110290@lums.edu.pk> Date: Sat, 8 Apr 2023 04:36:10 +0500 Subject: [PATCH 51/69] [fix]: Added an exception in main.py if pinecone_api_key is not provided --- scripts/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/main.py b/scripts/main.py index a79fd553c..17385bf33 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -281,11 +281,12 @@ next_action_count = 0 # Make a constant: user_input = "Determine which next command to use, and respond using the format specified above:" +# raise an exception if pinecone_api_key or region is not provided +if not cfg.pinecone_api_key or not cfg.pinecone_region: raise Exception("Please provide pinecone_api_key and pinecone_region") # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory memory = PineconeMemory() memory.clear() - print('Using memory of type: ' + memory.__class__.__name__) # Interaction Loop From 1d0848c1a89e6c29e5fde1688bcdfa267de07974 Mon Sep 17 00:00:00 2001 From: keenborder786 <21110290@lums.edu.pk> Date: Sat, 8 Apr 2023 04:37:46 +0500 Subject: [PATCH 52/69] [docs]: Updated ReadMe to update the requirements sections --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a89c5d03b..4fc0c349e 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,7 @@ Your support is greatly appreciated ## πŸ“‹ Requirements - [Python 3.7 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) - OpenAI API key +- PINECONE API key Optional: - ElevenLabs Key (If you want the AI to speak) From a34c51bf8622cf83a34493718c8be60c0676e603 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Fri, 7 Apr 2023 20:58:00 -0500 Subject: [PATCH 53/69] Update scripts/config.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jason KΓΆlker --- scripts/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 9afeb1d25..1601dcc43 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -61,9 +61,9 @@ class Config(metaclass=Singleton): # User agent headers to use when browsing web # Some websites might just completely deny request with an error code if no user agent was found. self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} - self.redis_host = os.getenv("REDIS_HOST") - self.redis_port = os.getenv("REDIS_PORT") - self.redis_password = os.getenv("REDIS_PASSWORD") + self.redis_host = os.getenv("REDIS_HOST", "localhost") + self.redis_port = os.getenv("REDIS_PORT", "6379") + self.redis_password = os.getenv("REDIS_PASSWORD", "") self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt') # Note that indexes must be created on db 0 in redis, this is not configureable. From 9328c8f7b5b9b7eb76dd131f36ad6109e8b28e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1bio=20Machado=20de=20Oliveira?= Date: Sat, 8 Apr 2023 00:15:14 -0300 Subject: [PATCH 54/69] Settings were being saved and loaded in the wrong directory --- scripts/ai_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ai_config.py b/scripts/ai_config.py index 2f4327486..8cfa183a9 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -1,6 +1,6 @@ import yaml import data - +import os class AIConfig: def __init__(self, ai_name="", ai_role="", ai_goals=[]): @@ -9,7 +9,7 @@ class AIConfig: self.ai_goals = ai_goals # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = "../ai_settings.yaml" + SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml') @classmethod def load(cls, config_file=SAVE_FILE): From cc05139843853ab2ac091c38253a810e418e675c Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sat, 8 Apr 2023 05:24:53 +0100 Subject: [PATCH 55/69] Update README.md --- README.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 4fc0c349e..6e92e6b95 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Auto-GPT is an experimental open-source application showcasing the capabilities https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4 -## πŸ’– Help Fund Auto-GPT's Development +

πŸ’– Help Fund Auto-GPT's Development πŸ’–

If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting. @@ -17,14 +17,13 @@ Your support is greatly appreciated

- Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. πŸ’– -

-

-thepok  SpacingLily  m  zkonduit  maxxflyer  tekelsey  nocodeclarity  tjarmain  alexisneuhaus  jaumebalust  robinicus  digisomni   -

+ Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. +

Individual Sponsors

-alexisneuhaus  iokode  jaumebalust  nova-land  robinicus  Void-n-Null  ritesh24  merwanehamadi  raulmarindev  siduppal  goosecubedaddy  pleabargain   +robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   +thepok +  SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain

From 8b36a5cfd33e38641d594bba10c6fe7356438938 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sat, 8 Apr 2023 12:27:05 +0100 Subject: [PATCH 56/69] Removes comment --- scripts/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/commands.py b/scripts/commands.py index bf8d79833..a45fb8963 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -103,7 +103,7 @@ def execute_command(command_name, arguments): return ai.write_tests(arguments["code"], arguments.get("focus")) elif command_name == "execute_python_file": # Add this command return execute_python_file(arguments["file"]) - elif command_name == "generate_image": # Add this command + elif command_name == "generate_image": return generate_image(arguments["prompt"]) elif command_name == "task_complete": shutdown() From 85d0d27045c2f426fc9618fa8a96b7c89fbdf82a Mon Sep 17 00:00:00 2001 From: "Jonathan S. Rouach" Date: Sat, 8 Apr 2023 15:20:10 +0300 Subject: [PATCH 57/69] fix: add Pillow dependency --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index ce2470985..7b1040401 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,3 +12,4 @@ docker duckduckgo-search google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 +Pillow From 3f66a6a0a307d015f166e213d56698bfa92cefcc Mon Sep 17 00:00:00 2001 From: Bill Morgan Date: Fri, 7 Apr 2023 15:36:03 -0500 Subject: [PATCH 58/69] fix typo in prompt.txt --- scripts/data/prompt.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt index 363342c07..77a449de5 100644 --- a/scripts/data/prompt.txt +++ b/scripts/data/prompt.txt @@ -18,7 +18,7 @@ COMMANDS: 12. Append to file: "append_to_file", args: "file": "", "text": "" 13. Delete file: "delete_file", args: "file": "" 14. Search Files: "search_files", args: "directory": "" -15. Evaluate Code: "evaluate_code", args: "code": "" +15. Evaluate Code: "evaluate_code", args: "code": "" 16. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" 17. Write Tests: "write_tests", args: "code": "", "focus": "" 18. Execute Python File: "execute_python_file", args: "file": "" From 7cba76228e0db94aa2b4de6e63096c3f7e65fe62 Mon Sep 17 00:00:00 2001 From: Bill Morgan Date: Sat, 8 Apr 2023 08:22:27 -0500 Subject: [PATCH 59/69] fix command error check --- scripts/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main.py b/scripts/main.py index 17385bf33..d36f979f6 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -358,7 +358,7 @@ while True: f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") # Execute command - if command_name.lower() == "error": + if command_name.lower().startswith( "error" ): result = f"Command {command_name} threw the following error: " + arguments elif command_name == "human_feedback": result = f"Human feedback: {user_input}" From d1777e39a8668674d40b06f3e3690e68e5daa27d Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sun, 9 Apr 2023 02:31:51 +0100 Subject: [PATCH 60/69] Fixes incorrect class names in __all__ Changes "Cache" to "Memory". --- scripts/memory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index dacb05b32..995230794 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -37,6 +37,6 @@ def get_memory(cfg, init=False): __all__ = [ "get_memory", "LocalCache", - "RedisCache", - "PineconeCache", + "RedisMemory", + "PineconeMemory", ] From 47c6117e1886d9b69f95807df1e4ee6a2c76eb64 Mon Sep 17 00:00:00 2001 From: Ryan Peach Date: Sat, 8 Apr 2023 22:59:28 -0400 Subject: [PATCH 61/69] Added time and date to the system message for each context --- scripts/chat.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/chat.py b/scripts/chat.py index 8da074c6b..0b110bbae 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -26,7 +26,10 @@ def create_chat_message(role, content): def generate_context(prompt, relevant_memory, full_message_history, model): current_context = [ create_chat_message( - "system", prompt), create_chat_message( + "system", prompt), + create_chat_message( + "system", f"The current time and date is {time.strftime('%c')}"), + create_chat_message( "system", f"Permanent memory: {relevant_memory}")] # Add messages from the full message history until we reach the token limit @@ -95,7 +98,7 @@ def chat_with_ai( # Count the currently used tokens current_tokens_used += tokens_to_add - + # Move to the next most recent message in the full message history next_message_to_add_index -= 1 From 2db7f0815eed6e96b94423c96a40b95fc47750d4 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Sat, 8 Apr 2023 22:25:59 -0500 Subject: [PATCH 62/69] Update main.py Remove pinecone config requirement --- scripts/main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index e16fb9d14..10f9d0dca 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -281,8 +281,6 @@ next_action_count = 0 # Make a constant: user_input = "Determine which next command to use, and respond using the format specified above:" -# raise an exception if pinecone_api_key or region is not provided -if not cfg.pinecone_api_key or not cfg.pinecone_region: raise Exception("Please provide pinecone_api_key and pinecone_region") # Initialize memory and make sure it is empty. # this is particularly important for indexing and referencing pinecone memory memory = get_memory(cfg, init=True) From 9e139fb314b7b5c9b538a85d204ff08ce59e10bd Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sun, 9 Apr 2023 05:22:03 +0100 Subject: [PATCH 63/69] Wipe local memory on load --- scripts/memory/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index 995230794..a441a46aa 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -31,6 +31,8 @@ def get_memory(cfg, init=False): if memory is None: memory = LocalCache(cfg) + if init: + memory.clear() return memory From a861dec6764254b581d23d4573f1da8307bf533a Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Sat, 8 Apr 2023 23:33:18 -0500 Subject: [PATCH 64/69] Memory fixes. --- scripts/memory/local.py | 3 +++ scripts/memory/redismem.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/memory/local.py b/scripts/memory/local.py index fb1052242..8dc90021f 100644 --- a/scripts/memory/local.py +++ b/scripts/memory/local.py @@ -44,6 +44,8 @@ class LocalCache(MemoryProviderSingleton): Returns: None """ + if 'Command Error:' in text: + return "" self.data.texts.append(text) embedding = get_ada_embedding(text) @@ -64,6 +66,7 @@ class LocalCache(MemoryProviderSingleton): option=SAVE_OPTIONS ) f.write(out) + return text def clear(self) -> str: """ diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py index 296d0cce2..2082fe588 100644 --- a/scripts/memory/redismem.py +++ b/scripts/memory/redismem.py @@ -69,6 +69,8 @@ class RedisMemory(MemoryProviderSingleton): Returns: Message indicating that the data has been added. """ + if 'Command Error:' in data: + return "" vector = get_ada_embedding(data) vector = np.array(vector).astype(np.float32).tobytes() data_dict = { @@ -132,7 +134,7 @@ class RedisMemory(MemoryProviderSingleton): except Exception as e: print("Error calling Redis search: ", e) return None - return list(results.docs) + return [result.data for result in results.docs] def get_stats(self): """ From d8410d9ca308d2430a7928b61db002046e293b17 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sun, 9 Apr 2023 06:16:42 +0100 Subject: [PATCH 65/69] Makes it clearer to the AI exactly what memories are. --- scripts/chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/chat.py b/scripts/chat.py index 8da074c6b..5ab52f993 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -27,7 +27,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model): current_context = [ create_chat_message( "system", prompt), create_chat_message( - "system", f"Permanent memory: {relevant_memory}")] + "system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")] # Add messages from the full message history until we reach the token limit next_message_to_add_index = len(full_message_history) - 1 From a2fe619c7b7352b9aab35bc10b12f94946712282 Mon Sep 17 00:00:00 2001 From: Toran Bruce Richards Date: Sun, 9 Apr 2023 06:44:10 +0100 Subject: [PATCH 66/69] Improves response to AI that sends wrong output. --- scripts/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/commands.py b/scripts/commands.py index 1f255751c..ba5383957 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -109,7 +109,7 @@ def execute_command(command_name, arguments): elif command_name == "task_complete": shutdown() else: - return f"Unknown command {command_name}" + return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format." # All errors, return "Error: + error message" except Exception as e: return "Error: " + str(e) From 97711584c3d9cb904a42974f8d3879af9fd9431c Mon Sep 17 00:00:00 2001 From: Richard Beales Date: Sun, 9 Apr 2023 07:36:00 +0100 Subject: [PATCH 67/69] Update README to indicate Python 3.8 minimum Due to tiktoken dependency. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 760e62cf5..7154446e2 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Your support is greatly appreciated - πŸ—ƒοΈ File storage and summarization with GPT-3.5 ## πŸ“‹ Requirements -- [Python 3.7 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) +- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) - OpenAI API key - PINECONE API key From 3efdb4896166c38ff70b2f208c4a644774c1c92b Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Sun, 9 Apr 2023 02:38:06 -0500 Subject: [PATCH 68/69] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 760e62cf5..61181ebf9 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,7 @@ Run: ``` docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest ``` +See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. Set the following environment variables: ``` From 546d8783e78096d737351fca00d2cd701b9b72e5 Mon Sep 17 00:00:00 2001 From: Alexander Nikulin Date: Sun, 9 Apr 2023 14:33:30 +0400 Subject: [PATCH 69/69] put debug setting to cfg and use it in when calling chat.chat_with_at and fix_json --- scripts/config.py | 4 ++++ scripts/json_parser.py | 2 +- scripts/main.py | 6 +++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index d5f1a3f06..4d7adec1c 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -31,6 +31,7 @@ class Config(metaclass=Singleton): """ def __init__(self): + self.debug = False self.continuous_mode = False self.speak_mode = False # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good @@ -110,3 +111,6 @@ class Config(metaclass=Singleton): def set_pinecone_region(self, value: str): self.pinecone_region = value + + def set_debug_mode(self, value: bool): + self.debug = value diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 8ec9238b4..c863ccdbb 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -40,7 +40,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True): if try_to_fix_with_gpt: print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.") # Now try to fix this up using the ai_functions - ai_fixed_json = fix_json(json_str, json_schema, False) + ai_fixed_json = fix_json(json_str, json_schema, cfg.debug) if ai_fixed_json != "failed": return json.loads(ai_fixed_json) else: diff --git a/scripts/main.py b/scripts/main.py index a0a1898cc..f96afeb16 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -266,6 +266,10 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) + if args.debug: + print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") + cfg.set_debug_mode(True) + # TODO: fill in llm values here @@ -295,7 +299,7 @@ while True: user_input, full_message_history, memory, - cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument # Print Assistant thoughts print_assistant_thoughts(assistant_reply)