Add config as attribute to Agent, rename old config to ai_config (#4638)
* Add config as attribute to Agent, rename old config to ai_config * Code review: Pass ai_config --------- Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com>pull/4640/head^2
parent
15c6b0c1c3
commit
6b9e3b21d3
|
@ -65,29 +65,31 @@ class Agent:
|
|||
memory: VectorMemory,
|
||||
next_action_count: int,
|
||||
command_registry: CommandRegistry,
|
||||
config: AIConfig,
|
||||
ai_config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str,
|
||||
config: Config,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.history = MessageHistory(self)
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.ai_config = ai_config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens
|
||||
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(
|
||||
config.fast_llm_model
|
||||
).max_tokens
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
self.cycle_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
|
@ -112,34 +114,36 @@ class Agent:
|
|||
self.cycle_count += 1
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
[m.raw() for m in self.history],
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and self.cycle_count > cfg.continuous_limit
|
||||
self.config.continuous_mode
|
||||
and self.config.continuous_limit > 0
|
||||
and self.cycle_count > self.config.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
"Continuous Limit Reached: ",
|
||||
Fore.YELLOW,
|
||||
f"{self.config.continuous_limit}",
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... ", plain_output=cfg.plain_output):
|
||||
with Spinner("Thinking... ", plain_output=self.config.plain_output):
|
||||
assistant_reply = chat_with_ai(
|
||||
cfg,
|
||||
self.config,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
self.fast_token_limit,
|
||||
cfg.fast_llm_model,
|
||||
self.config.fast_llm_model,
|
||||
)
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(assistant_reply_json)
|
||||
|
@ -150,10 +154,10 @@ class Agent:
|
|||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
self.ai_name, assistant_reply_json, self.config.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
if self.config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
@ -161,7 +165,7 @@ class Agent:
|
|||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_json,
|
||||
|
@ -177,7 +181,7 @@ class Agent:
|
|||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
if not self.config.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
|
@ -188,13 +192,13 @@ class Agent:
|
|||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
if cfg.chat_messages_enabled:
|
||||
if self.config.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
if console_input.lower().strip() == self.config.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
|
@ -205,7 +209,7 @@ class Agent:
|
|||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
thoughts, self.config.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
|
@ -218,7 +222,9 @@ class Agent:
|
|||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
elif console_input.lower().startswith(
|
||||
f"{self.config.authorise_key} -"
|
||||
):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
|
@ -231,14 +237,14 @@ class Agent:
|
|||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
elif console_input.lower() == self.config.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
|
@ -271,7 +277,7 @@ class Agent:
|
|||
elif command_name == "self_feedback":
|
||||
result = f"Self feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
|
@ -281,22 +287,22 @@ class Agent:
|
|||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
config=cfg,
|
||||
self.ai_config.prompt_generator,
|
||||
config=self.config,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), cfg.fast_llm_model
|
||||
str(command_result), self.config.fast_llm_model
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), cfg.fast_llm_model
|
||||
str(self.history.summary_message()), self.config.fast_llm_model
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > self.fast_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
|
@ -337,7 +343,7 @@ class Agent:
|
|||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
ai_role = self.ai_config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
|
@ -349,7 +355,7 @@ class Agent:
|
|||
prompt.add("user", feedback_prompt + feedback_thoughts)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
|
@ -359,7 +365,7 @@ class Agent:
|
|||
feedback = create_chat_completion(prompt)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
feedback,
|
||||
|
|
|
@ -150,7 +150,7 @@ def chat_with_ai(
|
|||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.config.prompt_generator, message_sequence.raw()
|
||||
agent.ai_config.prompt_generator, message_sequence.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
|
@ -181,7 +181,7 @@ def chat_with_ai(
|
|||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.ai_config.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
message_sequence.raw(),
|
||||
|
|
|
@ -189,9 +189,10 @@ def run_auto_gpt(
|
|||
memory=memory,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace_directory,
|
||||
ai_config=ai_config,
|
||||
config=cfg,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
|
|
@ -184,7 +184,7 @@ Latest Development:
|
|||
|
||||
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.ai_config.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
prompt.raw(),
|
||||
|
@ -194,7 +194,7 @@ Latest Development:
|
|||
self.summary = create_chat_completion(prompt)
|
||||
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.ai_config.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
self.summary,
|
||||
|
|
|
@ -59,7 +59,8 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
|
|||
ai_name="",
|
||||
memory=memory_none,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -95,7 +96,8 @@ def file_system_agents(
|
|||
ai_name="Information Retrieval Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -125,7 +127,8 @@ def memory_management_agent(agent_test_config, memory_json_file, workspace: Work
|
|||
ai_name="Follow-Instructions-GPT",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -161,7 +164,8 @@ def information_retrieval_agents(
|
|||
ai_name="Information Retrieval Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -172,7 +176,9 @@ def information_retrieval_agents(
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def kubernetes_agent(memory_json_file, workspace: Workspace):
|
||||
def kubernetes_agent(
|
||||
agent_test_config: Config, memory_json_file: NoMemory, workspace: Workspace
|
||||
) -> Agent:
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
@ -193,7 +199,8 @@ def kubernetes_agent(memory_json_file, workspace: Workspace):
|
|||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -226,7 +233,8 @@ def get_nobel_prize_agent(agent_test_config, memory_json_file, workspace: Worksp
|
|||
ai_name="Get-PhysicsNobelPrize",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -270,7 +278,8 @@ def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace)
|
|||
ai_name="Debug Code Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
|
|
@ -4,28 +4,30 @@ import pytest
|
|||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
def agent(config: Config):
|
||||
ai_name = "Test AI"
|
||||
memory = MagicMock()
|
||||
next_action_count = 0
|
||||
command_registry = MagicMock()
|
||||
config = AIConfig()
|
||||
ai_config = AIConfig(ai_name=ai_name)
|
||||
system_prompt = "System prompt"
|
||||
triggering_prompt = "Triggering prompt"
|
||||
workspace_directory = "workspace_directory"
|
||||
|
||||
agent = Agent(
|
||||
ai_name,
|
||||
memory,
|
||||
next_action_count,
|
||||
command_registry,
|
||||
config,
|
||||
system_prompt,
|
||||
triggering_prompt,
|
||||
workspace_directory,
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
workspace_directory=workspace_directory,
|
||||
)
|
||||
return agent
|
||||
|
||||
|
@ -36,7 +38,7 @@ def test_agent_initialization(agent: Agent):
|
|||
assert agent.history.messages == []
|
||||
assert agent.next_action_count == 0
|
||||
assert agent.command_registry == agent.command_registry
|
||||
assert agent.config == agent.config
|
||||
assert agent.ai_config == agent.ai_config
|
||||
assert agent.system_prompt == "System prompt"
|
||||
assert agent.triggering_prompt == "Triggering prompt"
|
||||
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
from datetime import datetime
|
||||
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm.chat import create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import LogCycleHandler
|
||||
|
||||
|
||||
def test_get_self_feedback(mocker):
|
||||
def test_get_self_feedback(config: Config, mocker: MockerFixture):
|
||||
# Define a sample thoughts dictionary
|
||||
thoughts = {
|
||||
"reasoning": "Sample reasoning.",
|
||||
|
@ -32,7 +35,8 @@ def test_get_self_feedback(mocker):
|
|||
agent_mock = mocker.MagicMock(spec=Agent)
|
||||
|
||||
# Mock the config attribute of the Agent instance
|
||||
agent_mock.config = AIConfig()
|
||||
agent_mock.config = config
|
||||
agent_mock.ai_config = AIConfig()
|
||||
|
||||
# Mock the log_cycle_handler attribute of the Agent instance
|
||||
agent_mock.log_cycle_handler = LogCycleHandler()
|
||||
|
|
Loading…
Reference in New Issue