Automatic prompting (#2896)

* Add automatic ai prompting

* Tweak the default prompt.

* Print agent info upon creation.

* Improve system prompt

* Switch to fast_llm_model by default

* Add format output command to user prompt.

This vastly improves formatting success rate.

* Add fallback to manual mode if llm output cannot be parsed (or other error).

* Add unit test to cover ai creation setup.

* Replace redundent prompt with manual mode instructions.

* Add missing docstrings and typing.

* Runs black on changes.

* Runs isort

* Update Python version and benchmark file in benchmark.yml

* Refactor main function and imports in cli.py

* Update import statement in ai_config.py

* Add set_temperature and set_memory_backend methods in config.py

* Remove unused import in prompt.py

* Add goal oriented tasks workflow

* Added agent_utils to create agent

* added pytest and vcrpy

* added write file cassette

* created goal oriented task write file with cassettes to not pay openai tokens

* solve conflicts

* add ability set azure because github workflow needs it off

* solve conflicts in cli.py

* black because linter fails

* solve conflict

* setup github action to v3

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>

* fix conflicts

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>

* Plugins: debug line always printed in plugin load

* add decorator to tests

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>

* move decorator higher up

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>

* merge

---------

Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>
Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com>
Co-authored-by: Merwane Hamadi <merwane.hamadi@redica.com>
Co-authored-by: Richard Beales <rich@richbeales.net>
Co-authored-by: Nicholas Tindle <nick@ntindle.com>
Co-authored-by: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
pull/2948/head
Toran Bruce Richards 2023-04-23 17:36:10 +12:00 committed by GitHub
parent 2b5852f7da
commit f462674e32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 209 additions and 1 deletions

View File

@ -107,4 +107,22 @@ Continue (y/n): """
config = prompt_user()
config.save(CFG.ai_settings_file)
# Agent Created, print message
logger.typewriter_log(
config.ai_name,
Fore.LIGHTBLUE_EX,
"has been created with the following details:",
speak_text=True,
)
# Print the ai config details
# Name
logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False)
# Role
logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False)
# Goals
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
for goal in config.ai_goals:
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
return config

View File

@ -1,18 +1,26 @@
"""Set up the AI and its goals"""
import re
from colorama import Fore, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm_utils import create_chat_completion
from autogpt.logs import logger
CFG = Config()
def prompt_user() -> AIConfig:
"""Prompt the user for input
Returns:
AIConfig: The AIConfig object containing the user's input
AIConfig: The AIConfig object tailored to the user's input
"""
ai_name = ""
ai_config = None
# Construct the prompt
logger.typewriter_log(
"Welcome to Auto-GPT! ",
@ -21,6 +29,57 @@ def prompt_user() -> AIConfig:
speak_text=True,
)
# Get user desire
logger.typewriter_log(
"Create an AI-Assistant:",
Fore.GREEN,
"input '--manual' to enter manual mode.",
speak_text=True,
)
user_desire = utils.clean_input(
f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
)
if user_desire == "":
user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
# If user desire contains "--manual"
if "--manual" in user_desire:
logger.typewriter_log(
"Manual Mode Selected",
Fore.GREEN,
speak_text=True,
)
return generate_aiconfig_manual()
else:
try:
return generate_aiconfig_automatic(user_desire)
except Exception as e:
logger.typewriter_log(
"Unable to automatically generate AI Config based on user desire.",
Fore.RED,
"Falling back to manual mode.",
speak_text=True,
)
return generate_aiconfig_manual()
def generate_aiconfig_manual() -> AIConfig:
"""
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
This function guides the user through a series of prompts to collect the necessary information to create
an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
goals. If the user does not provide a value for any of the fields, default values will be used.
Returns:
AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
"""
# Manual Setup Intro
logger.typewriter_log(
"Create an AI-Assistant:",
Fore.GREEN,
@ -75,3 +134,63 @@ def prompt_user() -> AIConfig:
]
return AIConfig(ai_name, ai_role, ai_goals)
def generate_aiconfig_automatic(user_prompt) -> AIConfig:
"""Generates an AIConfig object from the given string.
Returns:
AIConfig: The AIConfig object tailored to the user's input
"""
system_prompt = """
Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
Example input:
Help me with marketing my business
Example output:
Name: CMOGPT
Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
Goals:
- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
"""
# Call LLM with the string as user input
messages = [
{
"role": "system",
"content": system_prompt,
},
{
"role": "user",
"content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n",
},
]
output = create_chat_completion(messages, CFG.fast_llm_model)
# Debug LLM Output
logger.debug(f"AI Config Generator Raw Output: {output}")
# Parse the output
ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
ai_role = (
re.search(
r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
output,
re.IGNORECASE | re.DOTALL,
)
.group(1)
.strip()
)
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
return AIConfig(ai_name, ai_role, ai_goals)

71
tests/unit/test_setup.py Normal file
View File

@ -0,0 +1,71 @@
import unittest
from io import StringIO
from unittest.mock import patch
from autogpt.config.ai_config import AIConfig
from autogpt.setup import (
generate_aiconfig_automatic,
generate_aiconfig_manual,
prompt_user,
)
class TestAutoGPT(unittest.TestCase):
def test_generate_aiconfig_automatic_default(self):
user_inputs = [""]
with patch("builtins.input", side_effect=user_inputs):
ai_config = prompt_user()
self.assertIsInstance(ai_config, AIConfig)
self.assertIsNotNone(ai_config.ai_name)
self.assertIsNotNone(ai_config.ai_role)
self.assertGreaterEqual(len(ai_config.ai_goals), 1)
self.assertLessEqual(len(ai_config.ai_goals), 5)
def test_generate_aiconfig_automatic_typical(self):
user_prompt = "Help me create a rock opera about cybernetic giraffes"
ai_config = generate_aiconfig_automatic(user_prompt)
self.assertIsInstance(ai_config, AIConfig)
self.assertIsNotNone(ai_config.ai_name)
self.assertIsNotNone(ai_config.ai_role)
self.assertGreaterEqual(len(ai_config.ai_goals), 1)
self.assertLessEqual(len(ai_config.ai_goals), 5)
def test_generate_aiconfig_automatic_fallback(self):
user_inputs = [
"T&GF£OIBECC()!*",
"Chef-GPT",
"an AI designed to browse bake a cake.",
"Purchase ingredients",
"Bake a cake",
"",
]
with patch("builtins.input", side_effect=user_inputs):
ai_config = prompt_user()
self.assertIsInstance(ai_config, AIConfig)
self.assertEqual(ai_config.ai_name, "Chef-GPT")
self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.")
self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"])
def test_prompt_user_manual_mode(self):
user_inputs = [
"--manual",
"Chef-GPT",
"an AI designed to browse bake a cake.",
"Purchase ingredients",
"Bake a cake",
"",
]
with patch("builtins.input", side_effect=user_inputs):
ai_config = prompt_user()
self.assertIsInstance(ai_config, AIConfig)
self.assertEqual(ai_config.ai_name, "Chef-GPT")
self.assertEqual(ai_config.ai_role, "an AI designed to browse bake a cake.")
self.assertEqual(ai_config.ai_goals, ["Purchase ingredients", "Bake a cake"])
if __name__ == "__main__":
unittest.main()