parent
7cd76b8d8e
commit
adbb47fb65
|
@ -12,7 +12,7 @@ from autogpt.configurator import create_config
|
|||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import construct_main_ai_config
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
|
||||
from autogpt.utils import get_current_git_branch, get_latest_bulletin
|
||||
from autogpt.workspace import Workspace
|
||||
from scripts.install_plugin_deps import install_plugin_dependencies
|
||||
|
@ -124,11 +124,7 @@ def run_auto_gpt(
|
|||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
|
@ -148,7 +144,7 @@ def run_auto_gpt(
|
|||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace_directory,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
|
|
@ -10,6 +10,10 @@ from autogpt.utils import clean_input
|
|||
|
||||
CFG = Config()
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
|
||||
|
||||
def build_default_prompt_generator() -> PromptGenerator:
|
||||
"""
|
||||
|
@ -71,9 +75,6 @@ def build_default_prompt_generator() -> PromptGenerator:
|
|||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"If you cannot think of a valid command to perform start or message an agent to determine the next command."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation("Write all code to a file.")
|
||||
return prompt_generator
|
||||
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
import os
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.app import CFG
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import AIConfig
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
|
||||
def create_browser_agent(workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.web_selenium")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="browse_website-GPT",
|
||||
ai_role="an AI designed to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer the question 'What is the price of the book?' and write the price to a file named \"browse_website.txt\", and use the task_complete command to complete the task.",
|
||||
ai_goals=[
|
||||
"Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'",
|
||||
'Write the price of the book to a file named "browse_website.txt".',
|
||||
"Use the task_complete command to complete the task.",
|
||||
"Do not use any other commands.",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
CFG.set_continuous_mode(True)
|
||||
CFG.set_memory_backend("no_memory")
|
||||
CFG.set_temperature(0)
|
||||
|
||||
memory = get_memory(CFG, init=True)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
memory=memory,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def create_writer_agent(workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="write_to_file-GPT",
|
||||
ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.",
|
||||
ai_goals=[
|
||||
"Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".",
|
||||
"Use the task_complete command to complete the task.",
|
||||
"Do not use any other commands.",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
CFG.set_continuous_mode(True)
|
||||
CFG.set_memory_backend("no_memory")
|
||||
CFG.set_temperature(0)
|
||||
memory = get_memory(CFG, init=True)
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
memory=memory,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
|
@ -0,0 +1,10 @@
|
|||
import concurrent
|
||||
|
||||
|
||||
def run_interaction_loop(agent, timeout):
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(agent.start_interaction_loop)
|
||||
try:
|
||||
result = future.result(timeout=timeout)
|
||||
except concurrent.futures.TimeoutError:
|
||||
assert False, f"The process took longer than {timeout} seconds to complete."
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,27 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.agent_factory import create_browser_agent
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.utils import get_workspace_file_path, requires_api_key
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@pytest.mark.vcr
|
||||
def test_browse_website(workspace) -> None:
|
||||
CFG.workspace_path = workspace.root
|
||||
CFG.file_logger_path = os.path.join(workspace.root, "file_logger.txt")
|
||||
|
||||
file_name = get_workspace_file_path(workspace, "browse_website.txt")
|
||||
agent = create_browser_agent(workspace)
|
||||
try:
|
||||
run_interaction_loop(agent, 40)
|
||||
# catch system exit exceptions
|
||||
except SystemExit: # the agent returns an exception when it shuts down
|
||||
content = read_file(file_name)
|
||||
assert "£25.89" in content, f"Expected £25.89, got {content}"
|
|
@ -1,14 +1,11 @@
|
|||
import concurrent
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.commands.file_operations import delete_file, read_file
|
||||
from autogpt.config import AIConfig, Config
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.agent_factory import create_writer_agent
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CFG = Config()
|
||||
|
@ -23,59 +20,8 @@ def test_write_file(workspace) -> None:
|
|||
file_name = str(workspace.get_path("hello_world.txt"))
|
||||
agent = create_writer_agent(workspace)
|
||||
try:
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(agent.start_interaction_loop)
|
||||
try:
|
||||
result = future.result(timeout=45)
|
||||
except concurrent.futures.TimeoutError:
|
||||
assert False, "The process took longer than 45 seconds to complete."
|
||||
run_interaction_loop(agent, 40)
|
||||
# catch system exit exceptions
|
||||
except SystemExit: # the agent returns an exception when it shuts down
|
||||
content = read_file(file_name)
|
||||
assert content == "Hello World", f"Expected 'Hello World', got {content}"
|
||||
|
||||
|
||||
def create_writer_agent(workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="write_to_file-GPT",
|
||||
ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.",
|
||||
ai_goals=[
|
||||
"Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".",
|
||||
"Use the task_complete command to complete the task.",
|
||||
"Do not use any other commands.",
|
||||
],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
CFG.set_continuous_mode(True)
|
||||
CFG.set_memory_backend("no_memory")
|
||||
CFG.set_temperature(0)
|
||||
memory = get_memory(CFG, init=True)
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
memory=memory,
|
||||
full_message_history=[],
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
os.environ["TIKTOKEN_CACHE_DIR"] = ""
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -40,3 +40,7 @@ def skip_in_ci(test_function):
|
|||
os.environ.get("CI") == "true",
|
||||
reason="This test doesn't work on GitHub Actions.",
|
||||
)(test_function)
|
||||
|
||||
|
||||
def get_workspace_file_path(workspace, file_name):
|
||||
return str(workspace.get_path(file_name))
|
||||
|
|
Loading…
Reference in New Issue