diff --git a/autogpts/autogpt/autogpt/core/ability/__init__.py b/autogpts/autogpt/autogpt/core/ability/__init__.py index 3cf310dec..a186d9ace 100644 --- a/autogpts/autogpt/autogpt/core/ability/__init__.py +++ b/autogpts/autogpt/autogpt/core/ability/__init__.py @@ -1,4 +1,8 @@ """The command system provides a way to extend the functionality of the AI agent.""" -from autogpt.core.ability.base import Ability, AbilityRegistry +from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry from autogpt.core.ability.schema import AbilityResult -from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry +from autogpt.core.ability.simple import ( + AbilityRegistryConfiguration, + AbilityRegistrySettings, + SimpleAbilityRegistry, +) diff --git a/autogpts/autogpt/autogpt/core/ability/base.py b/autogpts/autogpt/autogpt/core/ability/base.py index 0062219d7..2686c101c 100644 --- a/autogpts/autogpt/autogpt/core/ability/base.py +++ b/autogpts/autogpt/autogpt/core/ability/base.py @@ -5,11 +5,13 @@ from typing import Any, ClassVar import inflection from pydantic import Field - -from autogpt.core.ability.schema import AbilityResult from autogpt.core.configuration import SystemConfiguration from autogpt.core.planning.simple import LanguageModelConfiguration from autogpt.core.plugin.base import PluginLocation +from autogpt.core.resource.model_providers import CompletionModelFunction +from autogpt.core.utils.json_schema import JSONSchema + +from .schema import AbilityResult class AbilityConfiguration(SystemConfiguration): @@ -32,40 +34,34 @@ class Ability(abc.ABC): """The name of the ability.""" return inflection.underscore(cls.__name__) + @property @classmethod @abc.abstractmethod def description(cls) -> str: """A detailed description of what the ability does.""" ... + @property @classmethod @abc.abstractmethod - def arguments(cls) -> dict: - """A dict of arguments in standard json schema format.""" + def parameters(cls) -> dict[str, JSONSchema]: ... - @classmethod - def required_arguments(cls) -> list[str]: - """A list of required arguments.""" - return [] - @abc.abstractmethod async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult: ... def __str__(self) -> str: - return pformat(self.dump()) + return pformat(self.spec) - def dump(self) -> dict: - return { - "name": self.name(), - "description": self.description(), - "parameters": { - "type": "object", - "properties": self.arguments(), - "required": self.required_arguments(), - }, - } + @property + @classmethod + def spec(cls) -> CompletionModelFunction: + return CompletionModelFunction( + name=cls.name(), + description=cls.description, + parameters=cls.parameters, + ) class AbilityRegistry(abc.ABC): @@ -80,7 +76,7 @@ class AbilityRegistry(abc.ABC): ... @abc.abstractmethod - def dump_abilities(self) -> list[dict]: + def dump_abilities(self) -> list[CompletionModelFunction]: ... @abc.abstractmethod diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py b/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py index 8c53efb3e..e8526d3c5 100644 --- a/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py +++ b/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py @@ -1,8 +1,10 @@ import logging +from typing import ClassVar from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.schema import AbilityResult from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.utils.json_schema import JSONSchema class CreateNewAbility(Ability): @@ -21,74 +23,62 @@ class CreateNewAbility(Ability): self._logger = logger self._configuration = configuration - @classmethod - def description(cls) -> str: - return "Create a new ability by writing python code." + description: ClassVar[str] = "Create a new ability by writing python code." - @classmethod - def arguments(cls) -> dict: - return { - "ability_name": { - "type": "string", - "description": "A meaningful and concise name for the new ability.", - }, - "description": { - "type": "string", - "description": "A detailed description of the ability and its uses, including any limitations.", - }, - "arguments": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the argument.", - }, - "type": { - "type": "string", - "description": "The type of the argument. Must be a standard json schema type.", - }, - "description": { - "type": "string", - "description": "A detailed description of the argument and its uses.", - }, - }, + parameters: ClassVar[dict[str, JSONSchema]] = { + "ability_name": JSONSchema( + description="A meaningful and concise name for the new ability.", + type=JSONSchema.Type.STRING, + required=True, + ), + "description": JSONSchema( + description="A detailed description of the ability and its uses, including any limitations.", + type=JSONSchema.Type.STRING, + required=True, + ), + "arguments": JSONSchema( + description="A list of arguments that the ability will accept.", + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "name": JSONSchema( + description="The name of the argument.", + type=JSONSchema.Type.STRING, + ), + "type": JSONSchema( + description="The type of the argument. Must be a standard json schema type.", + type=JSONSchema.Type.STRING, + ), + "description": JSONSchema( + description="A detailed description of the argument and its uses.", + type=JSONSchema.Type.STRING, + ), }, - "description": "A list of arguments that the ability will accept.", - }, - "required_arguments": { - "type": "array", - "items": { - "type": "string", - "description": "The names of the arguments that are required.", - }, - "description": "A list of the names of the arguments that are required.", - }, - "package_requirements": { - "type": "array", - "items": { - "type": "string", - "description": "The of the Python package that is required to execute the ability.", - }, - "description": "A list of the names of the Python packages that are required to execute the ability.", - }, - "code": { - "type": "string", - "description": "The Python code that will be executed when the ability is called.", - }, - } - - @classmethod - def required_arguments(cls) -> list[str]: - return [ - "ability_name", - "description", - "arguments", - "required_arguments", - "package_requirements", - "code", - ] + ), + ), + "required_arguments": JSONSchema( + description="A list of the names of the arguments that are required.", + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + description="The names of the arguments that are required.", + type=JSONSchema.Type.STRING, + ), + ), + "package_requirements": JSONSchema( + description="A list of the names of the Python packages that are required to execute the ability.", + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + description="The of the Python package that is required to execute the ability.", + type=JSONSchema.Type.STRING, + ), + ), + "code": JSONSchema( + description="The Python code that will be executed when the ability is called.", + type=JSONSchema.Type.STRING, + required=True, + ), + } async def __call__( self, diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py b/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py index 43cd0d0cd..08dc8c7a9 100644 --- a/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py +++ b/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py @@ -1,13 +1,20 @@ import logging import os +from typing import ClassVar from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.utils.json_schema import JSONSchema from autogpt.core.workspace import Workspace class ReadFile(Ability): default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.ReadFile", + ), packages_required=["unstructured"], workspace_required=True, ) @@ -20,18 +27,14 @@ class ReadFile(Ability): self._logger = logger self._workspace = workspace - @property - def description(self) -> str: - return "Read and parse all text from a file." + description: ClassVar[str] = "Read and parse all text from a file." - @property - def arguments(self) -> dict: - return { - "filename": { - "type": "string", - "description": "The name of the file to read.", - }, - } + parameters: ClassVar[dict[str, JSONSchema]] = { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to read.", + ), + } def _check_preconditions(self, filename: str) -> AbilityResult | None: message = "" @@ -92,6 +95,10 @@ class ReadFile(Ability): class WriteFile(Ability): default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.WriteFile", + ), packages_required=["unstructured"], workspace_required=True, ) @@ -104,22 +111,18 @@ class WriteFile(Ability): self._logger = logger self._workspace = workspace - @property - def description(self) -> str: - return "Write text to a file." + description: ClassVar[str] = "Write text to a file." - @property - def arguments(self) -> dict: - return { - "filename": { - "type": "string", - "description": "The name of the file to write.", - }, - "contents": { - "type": "string", - "description": "The contents of the file to write.", - }, - } + parameters: ClassVar[dict[str, JSONSchema]] = { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to write.", + ), + "contents": JSONSchema( + type=JSONSchema.Type.STRING, + description="The contents of the file to write.", + ), + } def _check_preconditions( self, filename: str, contents: str diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py b/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py index 95a5e0948..67d51087f 100644 --- a/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py +++ b/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py @@ -1,16 +1,17 @@ import logging +from typing import ClassVar from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.schema import AbilityResult from autogpt.core.planning.simple import LanguageModelConfiguration from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat from autogpt.core.resource.model_providers import ( - LanguageModelMessage, - LanguageModelProvider, - MessageRole, + ChatMessage, + ChatModelProvider, ModelProviderName, OpenAIModelName, ) +from autogpt.core.utils.json_schema import JSONSchema class QueryLanguageModel(Ability): @@ -30,49 +31,33 @@ class QueryLanguageModel(Ability): self, logger: logging.Logger, configuration: AbilityConfiguration, - language_model_provider: LanguageModelProvider, + language_model_provider: ChatModelProvider, ): self._logger = logger self._configuration = configuration self._language_model_provider = language_model_provider - @classmethod - def description(cls) -> str: - return "Query a language model. A query should be a question and any relevant context." + description: ClassVar[str] = ( + "Query a language model." + " A query should be a question and any relevant context." + ) - @classmethod - def arguments(cls) -> dict: - return { - "query": { - "type": "string", - "description": "A query for a language model. A query should contain a question and any relevant context.", - }, - } - - @classmethod - def required_arguments(cls) -> list[str]: - return ["query"] + parameters: ClassVar[dict[str, JSONSchema]] = { + "query": JSONSchema( + type=JSONSchema.Type.STRING, + description="A query for a language model. A query should contain a question and any relevant context.", + ) + } async def __call__(self, query: str) -> AbilityResult: - messages = [ - LanguageModelMessage( - content=query, - role=MessageRole.USER, - ), - ] - model_response = await self._language_model_provider.create_language_completion( - model_prompt=messages, + model_response = await self._language_model_provider.create_chat_completion( + model_prompt=[ChatMessage.user(query)], functions=[], model_name=self._configuration.language_model_required.model_name, - completion_parser=self._parse_response, ) return AbilityResult( ability_name=self.name(), ability_args={"query": query}, success=True, - message=model_response.content["content"], + message=model_response.response["content"], ) - - @staticmethod - def _parse_response(response_content: dict) -> dict: - return {"content": response_content["content"]} diff --git a/autogpts/autogpt/autogpt/core/ability/simple.py b/autogpts/autogpt/autogpt/core/ability/simple.py index d7dd1030a..94d443e9b 100644 --- a/autogpts/autogpt/autogpt/core/ability/simple.py +++ b/autogpts/autogpt/autogpt/core/ability/simple.py @@ -7,7 +7,8 @@ from autogpt.core.configuration import Configurable, SystemConfiguration, System from autogpt.core.memory.base import Memory from autogpt.core.plugin.simple import SimplePluginService from autogpt.core.resource.model_providers import ( - LanguageModelProvider, + ChatModelProvider, + CompletionModelFunction, ModelProviderName, ) from autogpt.core.workspace.base import Workspace @@ -41,7 +42,7 @@ class SimpleAbilityRegistry(AbilityRegistry, Configurable): logger: logging.Logger, memory: Memory, workspace: Workspace, - model_providers: dict[ModelProviderName, LanguageModelProvider], + model_providers: dict[ModelProviderName, ChatModelProvider], ): self._configuration = settings.configuration self._logger = logger @@ -78,12 +79,10 @@ class SimpleAbilityRegistry(AbilityRegistry, Configurable): self._abilities.append(ability) def list_abilities(self) -> list[str]: - return [ - f"{ability.name()}: {ability.description()}" for ability in self._abilities - ] + return [f"{ability.name()}: {ability.description}" for ability in self._abilities] - def dump_abilities(self) -> list[dict]: - return [ability.dump() for ability in self._abilities] + def dump_abilities(self) -> list[CompletionModelFunction]: + return [ability.spec for ability in self._abilities] def get_ability(self, ability_name: str) -> Ability: for ability in self._abilities: diff --git a/autogpts/autogpt/autogpt/core/agent/simple.py b/autogpts/autogpt/autogpt/core/agent/simple.py index de99c135e..a66ca5cc4 100644 --- a/autogpts/autogpt/autogpt/core/agent/simple.py +++ b/autogpts/autogpt/autogpt/core/agent/simple.py @@ -19,7 +19,11 @@ from autogpt.core.plugin.simple import ( PluginStorageFormat, SimplePluginService, ) -from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings +from autogpt.core.resource.model_providers import ( + CompletionModelFunction, + OpenAIProvider, + OpenAISettings, +) from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings @@ -178,7 +182,7 @@ class SimpleAgent(Agent, Configurable): agent_goals=self._configuration.goals, abilities=self._ability_registry.list_abilities(), ) - tasks = [Task.parse_obj(task) for task in plan.content["task_list"]] + tasks = [Task.parse_obj(task) for task in plan.parsed_result["task_list"]] # TODO: Should probably do a step to evaluate the quality of the generated tasks, # and ensure that they have actionable ready and acceptance criteria @@ -186,7 +190,7 @@ class SimpleAgent(Agent, Configurable): self._task_queue.extend(tasks) self._task_queue.sort(key=lambda t: t.priority, reverse=True) self._task_queue[-1].context.status = TaskStatus.READY - return plan.content + return plan.parsed_result async def determine_next_ability(self, *args, **kwargs): if not self._task_queue: @@ -202,7 +206,7 @@ class SimpleAgent(Agent, Configurable): self._ability_registry.dump_abilities(), ) self._current_task = task - self._next_ability = next_ability.content + self._next_ability = next_ability.parsed_result return self._current_task, self._next_ability async def execute_next_ability(self, user_input: str, *args, **kwargs): @@ -236,7 +240,11 @@ class SimpleAgent(Agent, Configurable): task.context.status = TaskStatus.IN_PROGRESS return task - async def _choose_next_ability(self, task: Task, ability_schema: list[dict]): + async def _choose_next_ability( + self, + task: Task, + ability_specs: list[CompletionModelFunction], + ): """Choose the next ability to use for the task.""" self._logger.debug(f"Choosing next ability for task {task}.") if task.context.cycle_count > self._configuration.max_task_cycle_count: @@ -247,7 +255,7 @@ class SimpleAgent(Agent, Configurable): raise NotImplementedError else: next_ability = await self._planning.determine_next_ability( - task, ability_schema + task, ability_specs ) return next_ability @@ -328,7 +336,7 @@ class SimpleAgent(Agent, Configurable): user_objective, ) - return model_response.content + return model_response.parsed_result @classmethod def provision_agent( diff --git a/autogpts/autogpt/autogpt/core/configuration/schema.py b/autogpts/autogpt/autogpt/core/configuration/schema.py index 4a1a4d496..4c92a994b 100644 --- a/autogpts/autogpt/autogpt/core/configuration/schema.py +++ b/autogpts/autogpt/autogpt/core/configuration/schema.py @@ -1,10 +1,12 @@ import abc +import functools import typing from typing import Any, Generic, TypeVar from pydantic import BaseModel, Field +@functools.wraps(Field) def UserConfigurable(*args, **kwargs): return Field(*args, **kwargs, user_configurable=True) diff --git a/autogpts/autogpt/autogpt/core/planning/__init__.py b/autogpts/autogpt/autogpt/core/planning/__init__.py index 26c3aae35..517aa91a4 100644 --- a/autogpts/autogpt/autogpt/core/planning/__init__.py +++ b/autogpts/autogpt/autogpt/core/planning/__init__.py @@ -1,7 +1,3 @@ """The planning system organizes the Agent's activities.""" -from autogpt.core.planning.schema import ( - Task, - TaskStatus, - TaskType, -) +from autogpt.core.planning.schema import Task, TaskStatus, TaskType from autogpt.core.planning.simple import PlannerSettings, SimplePlanner diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py index 7d8c6ee2e..789756dec 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py @@ -1,12 +1,3 @@ -from .initial_plan import ( - InitialPlan, - InitialPlanConfiguration, -) -from .name_and_goals import ( - NameAndGoals, - NameAndGoalsConfiguration, -) -from .next_ability import ( - NextAbility, - NextAbilityConfiguration, -) +from .initial_plan import InitialPlan, InitialPlanConfiguration +from .name_and_goals import NameAndGoals, NameAndGoalsConfiguration +from .next_ability import NextAbility, NextAbilityConfiguration diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py index e9afde17b..b5a45a8bb 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py @@ -3,16 +3,14 @@ import logging from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.planning.schema import Task, TaskType from autogpt.core.prompting import PromptStrategy -from autogpt.core.prompting.schema import ( - LanguageModelClassification, - LanguageModelPrompt, -) +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification from autogpt.core.prompting.utils import json_loads, to_numbered_list from autogpt.core.resource.model_providers import ( - LanguageModelFunction, - LanguageModelMessage, - MessageRole, + AssistantChatMessageDict, + ChatMessage, + CompletionModelFunction, ) +from autogpt.core.utils.json_schema import JSONSchema logger = logging.getLogger(__name__) @@ -47,66 +45,56 @@ class InitialPlan(PromptStrategy): "You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}" ) - DEFAULT_CREATE_PLAN_FUNCTION = { - "name": "create_initial_agent_plan", - "description": "Creates a set of tasks that forms the initial plan for an autonomous agent.", - "parameters": { - "type": "object", - "properties": { - "task_list": { - "type": "array", - "items": { - "type": "object", - "properties": { - "objective": { - "type": "string", - "description": "An imperative verb phrase that succinctly describes the task.", - }, - "type": { - "type": "string", - "description": "A categorization for the task. ", - "enum": [t.value for t in TaskType], - }, - "acceptance_criteria": { - "type": "array", - "items": { - "type": "string", - "description": "A list of measurable and testable criteria that must be met for the task to be considered complete.", - }, - }, - "priority": { - "type": "integer", - "description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.", - "minimum": 1, - "maximum": 10, - }, - "ready_criteria": { - "type": "array", - "items": { - "type": "string", - "description": "A list of measurable and testable criteria that must be met before the task can be started.", - }, - }, - }, - "required": [ - "objective", - "type", - "acceptance_criteria", - "priority", - "ready_criteria", - ], + DEFAULT_CREATE_PLAN_FUNCTION = CompletionModelFunction( + name="create_initial_agent_plan", + description="Creates a set of tasks that forms the initial plan for an autonomous agent.", + parameters={ + "task_list": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "objective": JSONSchema( + type=JSONSchema.Type.STRING, + description="An imperative verb phrase that succinctly describes the task.", + ), + "type": JSONSchema( + type=JSONSchema.Type.STRING, + description="A categorization for the task.", + enum=[t.value for t in TaskType], + ), + "acceptance_criteria": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.STRING, + description="A list of measurable and testable criteria that must be met for the task to be considered complete.", + ), + ), + "priority": JSONSchema( + type=JSONSchema.Type.INTEGER, + description="A number between 1 and 10 indicating the priority of the task relative to other generated tasks.", + minimum=1, + maximum=10, + ), + "ready_criteria": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.STRING, + description="A list of measurable and testable criteria that must be met before the task can be started.", + ), + ), }, - }, - }, + ), + ), }, - } + ) default_configuration: InitialPlanConfiguration = InitialPlanConfiguration( model_classification=LanguageModelClassification.SMART_MODEL, system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, system_info=DEFAULT_SYSTEM_INFO, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, - create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION, + create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION.schema, ) def __init__( @@ -121,7 +109,7 @@ class InitialPlan(PromptStrategy): self._system_prompt_template = system_prompt_template self._system_info = system_info self._user_prompt_template = user_prompt_template - self._create_plan_function = create_plan_function + self._create_plan_function = CompletionModelFunction.parse(create_plan_function) @property def model_classification(self) -> LanguageModelClassification: @@ -137,7 +125,7 @@ class InitialPlan(PromptStrategy): api_budget: float, current_time: str, **kwargs, - ) -> LanguageModelPrompt: + ) -> ChatPrompt: template_kwargs = { "agent_name": agent_name, "agent_role": agent_role, @@ -154,28 +142,23 @@ class InitialPlan(PromptStrategy): self._system_info, **template_kwargs ) - system_prompt = LanguageModelMessage( - role=MessageRole.SYSTEM, - content=self._system_prompt_template.format(**template_kwargs), + system_prompt = ChatMessage.system( + self._system_prompt_template.format(**template_kwargs), ) - user_prompt = LanguageModelMessage( - role=MessageRole.USER, - content=self._user_prompt_template.format(**template_kwargs), - ) - create_plan_function = LanguageModelFunction( - json_schema=self._create_plan_function, + user_prompt = ChatMessage.user( + self._user_prompt_template.format(**template_kwargs), ) - return LanguageModelPrompt( + return ChatPrompt( messages=[system_prompt, user_prompt], - functions=[create_plan_function], + functions=[self._create_plan_function], # TODO: tokens_used=0, ) def parse_response_content( self, - response_content: dict, + response_content: AssistantChatMessageDict, ) -> dict: """Parse the actual text response from the objective model. @@ -184,7 +167,6 @@ class InitialPlan(PromptStrategy): Returns: The parsed response. - """ try: parsed_response = json_loads(response_content["function_call"]["arguments"]) diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py index fb9f5b9b6..0cb2b557f 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py @@ -2,16 +2,14 @@ import logging from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.prompting import PromptStrategy -from autogpt.core.prompting.schema import ( - LanguageModelClassification, - LanguageModelPrompt, -) +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification from autogpt.core.prompting.utils import json_loads from autogpt.core.resource.model_providers import ( - LanguageModelFunction, - LanguageModelMessage, - MessageRole, + AssistantChatMessageDict, + ChatMessage, + CompletionModelFunction, ) +from autogpt.core.utils.json_schema import JSONSchema logger = logging.getLogger(__name__) @@ -53,43 +51,39 @@ class NameAndGoals(PromptStrategy): DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""' - DEFAULT_CREATE_AGENT_FUNCTION = { - "name": "create_agent", - "description": ("Create a new autonomous AI agent to complete a given task."), - "parameters": { - "type": "object", - "properties": { - "agent_name": { - "type": "string", - "description": "A short role-based name for an autonomous agent.", - }, - "agent_role": { - "type": "string", - "description": "An informative one sentence description of what the AI agent does", - }, - "agent_goals": { - "type": "array", - "minItems": 1, - "maxItems": 5, - "items": { - "type": "string", - }, - "description": ( - "One to five highly effective goals that are optimally aligned with the completion of a " - "specific task. The number and complexity of the goals should correspond to the " - "complexity of the agent's primary objective." - ), - }, - }, - "required": ["agent_name", "agent_role", "agent_goals"], + DEFAULT_CREATE_AGENT_FUNCTION = CompletionModelFunction( + name="create_agent", + description="Create a new autonomous AI agent to complete a given task.", + parameters={ + "agent_name": JSONSchema( + type=JSONSchema.Type.STRING, + description="A short role-based name for an autonomous agent.", + ), + "agent_role": JSONSchema( + type=JSONSchema.Type.STRING, + description="An informative one sentence description of what the AI agent does", + ), + "agent_goals": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five highly effective goals that are optimally aligned with the completion of a " + "specific task. The number and complexity of the goals should correspond to the " + "complexity of the agent's primary objective." + ), + ), }, - } + ) default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration( model_classification=LanguageModelClassification.SMART_MODEL, system_prompt=DEFAULT_SYSTEM_PROMPT, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, - create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION, + create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION.schema, ) def __init__( @@ -97,34 +91,29 @@ class NameAndGoals(PromptStrategy): model_classification: LanguageModelClassification, system_prompt: str, user_prompt_template: str, - create_agent_function: str, + create_agent_function: dict, ): self._model_classification = model_classification self._system_prompt_message = system_prompt self._user_prompt_template = user_prompt_template - self._create_agent_function = create_agent_function + self._create_agent_function = CompletionModelFunction.parse( + create_agent_function + ) @property def model_classification(self) -> LanguageModelClassification: return self._model_classification - def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt: - system_message = LanguageModelMessage( - role=MessageRole.SYSTEM, - content=self._system_prompt_message, - ) - user_message = LanguageModelMessage( - role=MessageRole.USER, - content=self._user_prompt_template.format( + def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt: + system_message = ChatMessage.system(self._system_prompt_message) + user_message = ChatMessage.user( + self._user_prompt_template.format( user_objective=user_objective, - ), + ) ) - create_agent_function = LanguageModelFunction( - json_schema=self._create_agent_function, - ) - prompt = LanguageModelPrompt( + prompt = ChatPrompt( messages=[system_message, user_message], - functions=[create_agent_function], + functions=[self._create_agent_function], # TODO tokens_used=0, ) @@ -132,7 +121,7 @@ class NameAndGoals(PromptStrategy): def parse_response_content( self, - response_content: dict, + response_content: AssistantChatMessageDict, ) -> dict: """Parse the actual text response from the objective model. diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py index 646d08992..5fd0052d7 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py @@ -1,18 +1,16 @@ import logging from autogpt.core.configuration import SystemConfiguration, UserConfigurable -from autogpt.core.prompting import PromptStrategy -from autogpt.core.prompting.schema import ( - LanguageModelClassification, - LanguageModelPrompt, -) -from autogpt.core.prompting.utils import json_loads, to_numbered_list from autogpt.core.planning.schema import Task +from autogpt.core.prompting import PromptStrategy +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification +from autogpt.core.prompting.utils import json_loads, to_numbered_list from autogpt.core.resource.model_providers import ( - LanguageModelFunction, - LanguageModelMessage, - MessageRole, + AssistantChatMessageDict, + ChatMessage, + CompletionModelFunction, ) +from autogpt.core.utils.json_schema import JSONSchema logger = logging.getLogger(__name__) @@ -51,18 +49,18 @@ class NextAbility(PromptStrategy): ) DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = { - "motivation": { - "type": "string", - "description": "Your justification for choosing choosing this function instead of a different one.", - }, - "self_criticism": { - "type": "string", - "description": "Thoughtful self-criticism that explains why this function may not be the best choice.", - }, - "reasoning": { - "type": "string", - "description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.", - }, + "motivation": JSONSchema( + type=JSONSchema.Type.STRING, + description="Your justification for choosing choosing this function instead of a different one.", + ), + "self_criticism": JSONSchema( + type=JSONSchema.Type.STRING, + description="Thoughtful self-criticism that explains why this function may not be the best choice.", + ), + "reasoning": JSONSchema( + type=JSONSchema.Type.STRING, + description="Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.", + ), } default_configuration: NextAbilityConfiguration = NextAbilityConfiguration( @@ -70,7 +68,9 @@ class NextAbility(PromptStrategy): system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, system_info=DEFAULT_SYSTEM_INFO, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, - additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS, + additional_ability_arguments={ + k: v.dump() for k, v in DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS.items() + }, ) def __init__( @@ -85,7 +85,11 @@ class NextAbility(PromptStrategy): self._system_prompt_template = system_prompt_template self._system_info = system_info self._user_prompt_template = user_prompt_template - self._additional_ability_arguments = additional_ability_arguments + self._additional_ability_arguments = JSONSchema.parse_properties( + additional_ability_arguments + ) + for p in self._additional_ability_arguments.values(): + p.required = True @property def model_classification(self) -> LanguageModelClassification: @@ -94,12 +98,12 @@ class NextAbility(PromptStrategy): def build_prompt( self, task: Task, - ability_schema: list[dict], + ability_specs: list[CompletionModelFunction], os_info: str, api_budget: float, current_time: str, **kwargs, - ) -> LanguageModelPrompt: + ) -> ChatPrompt: template_kwargs = { "os_info": os_info, "api_budget": api_budget, @@ -107,13 +111,8 @@ class NextAbility(PromptStrategy): **kwargs, } - for ability in ability_schema: - ability["parameters"]["properties"].update( - self._additional_ability_arguments - ) - ability["parameters"]["required"] += list( - self._additional_ability_arguments.keys() - ) + for ability in ability_specs: + ability.parameters.update(self._additional_ability_arguments) template_kwargs["task_objective"] = task.objective template_kwargs["cycle_count"] = task.context.cycle_count @@ -143,28 +142,23 @@ class NextAbility(PromptStrategy): **template_kwargs, ) - system_prompt = LanguageModelMessage( - role=MessageRole.SYSTEM, - content=self._system_prompt_template.format(**template_kwargs), + system_prompt = ChatMessage.system( + self._system_prompt_template.format(**template_kwargs) ) - user_prompt = LanguageModelMessage( - role=MessageRole.USER, - content=self._user_prompt_template.format(**template_kwargs), + user_prompt = ChatMessage.user( + self._user_prompt_template.format(**template_kwargs) ) - functions = [ - LanguageModelFunction(json_schema=ability) for ability in ability_schema - ] - return LanguageModelPrompt( + return ChatPrompt( messages=[system_prompt, user_prompt], - functions=functions, + functions=ability_specs, # TODO: tokens_used=0, ) def parse_response_content( self, - response_content: dict, + response_content: AssistantChatMessageDict, ) -> dict: """Parse the actual text response from the objective model. @@ -177,7 +171,9 @@ class NextAbility(PromptStrategy): """ try: function_name = response_content["function_call"]["name"] - function_arguments = json_loads(response_content["function_call"]["arguments"]) + function_arguments = json_loads( + response_content["function_call"]["arguments"] + ) parsed_response = { "motivation": function_arguments.pop("motivation"), "self_criticism": function_arguments.pop("self_criticism"), diff --git a/autogpts/autogpt/autogpt/core/planning/simple.py b/autogpts/autogpt/autogpt/core/planning/simple.py index 9fe5b8ec1..7306a3ab4 100644 --- a/autogpts/autogpt/autogpt/core/planning/simple.py +++ b/autogpts/autogpt/autogpt/core/planning/simple.py @@ -15,8 +15,9 @@ from autogpt.core.planning.schema import Task from autogpt.core.prompting import PromptStrategy from autogpt.core.prompting.schema import LanguageModelClassification from autogpt.core.resource.model_providers import ( - LanguageModelProvider, - LanguageModelResponse, + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, ModelProviderName, OpenAIModelName, ) @@ -82,14 +83,14 @@ class SimplePlanner(Configurable): self, settings: PlannerSettings, logger: logging.Logger, - model_providers: dict[ModelProviderName, LanguageModelProvider], + model_providers: dict[ModelProviderName, ChatModelProvider], workspace: Workspace = None, # Workspace is not available during bootstrapping. ) -> None: self._configuration = settings.configuration self._logger = logger self._workspace = workspace - self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {} + self._providers: dict[LanguageModelClassification, ChatModelProvider] = {} for model, model_config in self._configuration.models.items(): self._providers[model] = model_providers[model_config.provider_name] @@ -105,7 +106,7 @@ class SimplePlanner(Configurable): ), } - async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse: + async def decide_name_and_goals(self, user_objective: str) -> ChatModelResponse: return await self.chat_with_model( self._prompt_strategies["name_and_goals"], user_objective=user_objective, @@ -117,7 +118,7 @@ class SimplePlanner(Configurable): agent_role: str, agent_goals: list[str], abilities: list[str], - ) -> LanguageModelResponse: + ) -> ChatModelResponse: return await self.chat_with_model( self._prompt_strategies["initial_plan"], agent_name=agent_name, @@ -129,19 +130,19 @@ class SimplePlanner(Configurable): async def determine_next_ability( self, task: Task, - ability_schema: list[dict], + ability_specs: list[CompletionModelFunction], ): return await self.chat_with_model( self._prompt_strategies["next_ability"], task=task, - ability_schema=ability_schema, + ability_specs=ability_specs, ) async def chat_with_model( self, prompt_strategy: PromptStrategy, **kwargs, - ) -> LanguageModelResponse: + ) -> ChatModelResponse: model_classification = prompt_strategy.model_classification model_configuration = self._configuration.models[model_classification].dict() self._logger.debug(f"Using model configuration: {model_configuration}") @@ -153,13 +154,13 @@ class SimplePlanner(Configurable): prompt = prompt_strategy.build_prompt(**template_kwargs) self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n") - response = await provider.create_language_completion( + response = await provider.create_chat_completion( model_prompt=prompt.messages, functions=prompt.functions, **model_configuration, completion_parser=prompt_strategy.parse_response_content, ) - return LanguageModelResponse.parse_obj(response.dict()) + return response def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy): provider = self._providers[strategy.model_classification] diff --git a/autogpts/autogpt/autogpt/core/plugin/base.py b/autogpts/autogpt/autogpt/core/plugin/base.py index bbd99ad8e..4823dd260 100644 --- a/autogpts/autogpt/autogpt/core/plugin/base.py +++ b/autogpts/autogpt/autogpt/core/plugin/base.py @@ -10,15 +10,15 @@ if TYPE_CHECKING: from autogpt.core.ability import Ability, AbilityRegistry from autogpt.core.memory import Memory from autogpt.core.resource.model_providers import ( + ChatModelProvider, EmbeddingModelProvider, - LanguageModelProvider, ) # Expand to other types as needed PluginType = ( Type[Ability] # Swappable now | Type[AbilityRegistry] # Swappable maybe never - | Type[LanguageModelProvider] # Swappable soon + | Type[ChatModelProvider] # Swappable soon | Type[EmbeddingModelProvider] # Swappable soon | Type[Memory] # Swappable now # | Type[Planner] # Swappable soon diff --git a/autogpts/autogpt/autogpt/core/poetry.lock b/autogpts/autogpt/autogpt/core/poetry.lock index a17ff8306..9b3a0ccd1 100644 --- a/autogpts/autogpt/autogpt/core/poetry.lock +++ b/autogpts/autogpt/autogpt/core/poetry.lock @@ -543,6 +543,41 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jsonschema" +version = "4.19.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, + {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.7.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, + {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"}, +] + +[package.dependencies] +referencing = ">=0.28.0" + [[package]] name = "multidict" version = "6.0.4" @@ -832,6 +867,21 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "referencing" +version = "0.30.2" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"}, + {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "regex" version = "2023.8.8" @@ -950,6 +1000,112 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rpds-py" +version = "0.10.3" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.10.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e"}, + {file = "rpds_py-0.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e"}, + {file = "rpds_py-0.10.3-cp310-none-win32.whl", hash = "sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391"}, + {file = "rpds_py-0.10.3-cp310-none-win_amd64.whl", hash = "sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff"}, + {file = "rpds_py-0.10.3-cp311-none-win32.whl", hash = "sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48"}, + {file = "rpds_py-0.10.3-cp311-none-win_amd64.whl", hash = "sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b"}, + {file = "rpds_py-0.10.3-cp38-none-win32.whl", hash = "sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1"}, + {file = "rpds_py-0.10.3-cp38-none-win_amd64.whl", hash = "sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1"}, + {file = "rpds_py-0.10.3-cp39-none-win32.whl", hash = "sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065"}, + {file = "rpds_py-0.10.3-cp39-none-win_amd64.whl", hash = "sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599"}, + {file = "rpds_py-0.10.3.tar.gz", hash = "sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb"}, +] + [[package]] name = "sniffio" version = "1.3.0" @@ -1056,24 +1212,24 @@ telegram = ["requests"] [[package]] name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] [[package]] name = "urllib3" -version = "2.0.4" +version = "2.0.5" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.7" files = [ - {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, - {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, + {file = "urllib3-2.0.5-py3-none-any.whl", hash = "sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e"}, + {file = "urllib3-2.0.5.tar.gz", hash = "sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594"}, ] [package.extras] @@ -1186,4 +1342,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "50ad53581d2716ee6927df7200b2522acfaad35aadc76909bdab4073c49a824e" +content-hash = "e5acc4decd67692ad0f08e38d380e1a474ef480449b78dd14321dccf1ad3ca5a" diff --git a/autogpts/autogpt/autogpt/core/prompting/__init__.py b/autogpts/autogpt/autogpt/core/prompting/__init__.py index 53804a9b6..305c35685 100644 --- a/autogpts/autogpt/autogpt/core/prompting/__init__.py +++ b/autogpts/autogpt/autogpt/core/prompting/__init__.py @@ -1,8 +1,8 @@ from .base import PromptStrategy -from .schema import LanguageModelClassification, LanguageModelPrompt +from .schema import ChatPrompt, LanguageModelClassification __all__ = [ "LanguageModelClassification", - "LanguageModelPrompt", + "ChatPrompt", "PromptStrategy", ] diff --git a/autogpts/autogpt/autogpt/core/prompting/base.py b/autogpts/autogpt/autogpt/core/prompting/base.py index 013f80b54..26f56a3e5 100644 --- a/autogpts/autogpt/autogpt/core/prompting/base.py +++ b/autogpts/autogpt/autogpt/core/prompting/base.py @@ -2,12 +2,9 @@ import abc from typing import Generic, TypeVar from autogpt.core.configuration import SystemConfiguration +from autogpt.core.resource.model_providers import AssistantChatMessageDict -from .schema import ( - LanguageModelClassification, - LanguageModelPrompt, -) - +from .schema import ChatPrompt, LanguageModelClassification IN = TypeVar("IN", bound=dict) OUT = TypeVar("OUT") @@ -22,9 +19,9 @@ class PromptStrategy(abc.ABC, Generic[IN, OUT]): ... @abc.abstractmethod - def build_prompt(self, *_, **kwargs: IN) -> LanguageModelPrompt: + def build_prompt(self, *_, **kwargs: IN) -> ChatPrompt: ... @abc.abstractmethod - def parse_response_content(self, response_content: dict) -> OUT: + def parse_response_content(self, response_content: AssistantChatMessageDict) -> OUT: ... diff --git a/autogpts/autogpt/autogpt/core/prompting/schema.py b/autogpts/autogpt/autogpt/core/prompting/schema.py index 67e952138..45efc40fe 100644 --- a/autogpts/autogpt/autogpt/core/prompting/schema.py +++ b/autogpts/autogpt/autogpt/core/prompting/schema.py @@ -3,8 +3,9 @@ import enum from pydantic import BaseModel, Field from autogpt.core.resource.model_providers.schema import ( - LanguageModelFunction, - LanguageModelMessage, + ChatMessage, + ChatMessageDict, + CompletionModelFunction, ) @@ -20,12 +21,14 @@ class LanguageModelClassification(str, enum.Enum): SMART_MODEL = "smart_model" -class LanguageModelPrompt(BaseModel): - messages: list[LanguageModelMessage] - functions: list[LanguageModelFunction] = Field(default_factory=list) +class ChatPrompt(BaseModel): + messages: list[ChatMessage] + functions: list[CompletionModelFunction] = Field(default_factory=list) + + def raw(self) -> list[ChatMessageDict]: + return [m.dict() for m in self.messages] def __str__(self): return "\n\n".join( - f"{m.role.value.upper()}: {m.content}" - for m in self.messages + f"{m.role.value.upper()}: {m.content}" for m in self.messages ) diff --git a/autogpts/autogpt/autogpt/core/pyproject.toml b/autogpts/autogpt/autogpt/core/pyproject.toml index e9f252d2d..60047ca1c 100644 --- a/autogpts/autogpt/autogpt/core/pyproject.toml +++ b/autogpts/autogpt/autogpt/core/pyproject.toml @@ -25,6 +25,7 @@ click = "^8.1.7" colorama = "^0.4.6" distro = "^1.8.0" inflection = "^0.5.1" +jsonschema = "^4.19.1" openai = "^0.28.0" pydantic = "^1.10.12" pyyaml = "^6.0.0" diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py b/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py index 8b87efc82..b896760d2 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py @@ -1,20 +1,25 @@ -from autogpt.core.resource.model_providers.openai import ( +from .openai import ( + OPEN_AI_CHAT_MODELS, + OPEN_AI_EMBEDDING_MODELS, OPEN_AI_MODELS, OpenAIModelName, OpenAIProvider, OpenAISettings, ) -from autogpt.core.resource.model_providers.schema import ( +from .schema import ( + AssistantChatMessage, + AssistantChatMessageDict, + AssistantFunctionCall, + AssistantFunctionCallDict, + ChatMessage, + ChatModelInfo, + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, Embedding, EmbeddingModelInfo, EmbeddingModelProvider, EmbeddingModelResponse, - LanguageModelFunction, - LanguageModelInfo, - LanguageModelMessage, - LanguageModelProvider, - LanguageModelResponse, - MessageRole, ModelInfo, ModelProvider, ModelProviderBudget, @@ -24,19 +29,23 @@ from autogpt.core.resource.model_providers.schema import ( ModelProviderSettings, ModelProviderUsage, ModelResponse, + ModelTokenizer, ) __all__ = [ + "AssistantChatMessage", + "AssistantChatMessageDict", + "AssistantFunctionCall", + "AssistantFunctionCallDict", + "ChatMessage", + "ChatModelInfo", + "ChatModelProvider", + "ChatModelResponse", + "CompletionModelFunction", "Embedding", "EmbeddingModelInfo", "EmbeddingModelProvider", "EmbeddingModelResponse", - "LanguageModelFunction", - "LanguageModelInfo", - "LanguageModelMessage", - "LanguageModelProvider", - "LanguageModelResponse", - "MessageRole", "ModelInfo", "ModelProvider", "ModelProviderBudget", @@ -46,7 +55,10 @@ __all__ = [ "ModelProviderSettings", "ModelProviderUsage", "ModelResponse", + "ModelTokenizer", "OPEN_AI_MODELS", + "OPEN_AI_CHAT_MODELS", + "OPEN_AI_EMBEDDING_MODELS", "OpenAIModelName", "OpenAIProvider", "OpenAISettings", diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py index 461f8e137..1cc2147ca 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py @@ -15,23 +15,28 @@ from autogpt.core.configuration import ( UserConfigurable, ) from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessageDict, + ChatMessage, + ChatModelInfo, + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, Embedding, - EmbeddingModelProvider, EmbeddingModelInfo, + EmbeddingModelProvider, EmbeddingModelResponse, - LanguageModelFunction, - LanguageModelMessage, - LanguageModelProvider, - LanguageModelInfo, - LanguageModelResponse, ModelProviderBudget, ModelProviderCredentials, ModelProviderName, ModelProviderService, ModelProviderSettings, ModelProviderUsage, + ModelTokenizer, ) +_T = TypeVar("_T") +_P = ParamSpec("_P") + OpenAIEmbeddingParser = Callable[[Embedding], Embedding] OpenAIChatParser = Callable[[str], dict] @@ -62,47 +67,51 @@ OPEN_AI_EMBEDDING_MODELS = { name=OpenAIModelName.ADA, service=ModelProviderService.EMBEDDING, provider_name=ModelProviderName.OPENAI, - prompt_token_cost=0.0001/1000, + prompt_token_cost=0.0001 / 1000, max_tokens=8191, embedding_dimensions=1536, ), } -OPEN_AI_LANGUAGE_MODELS = { +OPEN_AI_CHAT_MODELS = { info.name: info for info in [ - LanguageModelInfo( + ChatModelInfo( name=OpenAIModelName.GPT3, - service=ModelProviderService.LANGUAGE, + service=ModelProviderService.CHAT, provider_name=ModelProviderName.OPENAI, - prompt_token_cost=0.0015/1000, - completion_token_cost=0.002/1000, + prompt_token_cost=0.0015 / 1000, + completion_token_cost=0.002 / 1000, max_tokens=4096, + has_function_call_api=True, ), - LanguageModelInfo( + ChatModelInfo( name=OpenAIModelName.GPT3_16k, - service=ModelProviderService.LANGUAGE, + service=ModelProviderService.CHAT, provider_name=ModelProviderName.OPENAI, - prompt_token_cost=0.003/1000, - completion_token_cost=0.004/1000, + prompt_token_cost=0.003 / 1000, + completion_token_cost=0.004 / 1000, max_tokens=16384, + has_function_call_api=True, ), - LanguageModelInfo( + ChatModelInfo( name=OpenAIModelName.GPT4, - service=ModelProviderService.LANGUAGE, + service=ModelProviderService.CHAT, provider_name=ModelProviderName.OPENAI, - prompt_token_cost=0.03/1000, - completion_token_cost=0.06/1000, - max_tokens=8192, + prompt_token_cost=0.03 / 1000, + completion_token_cost=0.06 / 1000, + max_tokens=8191, + has_function_call_api=True, ), - LanguageModelInfo( + ChatModelInfo( name=OpenAIModelName.GPT4_32k, - service=ModelProviderService.LANGUAGE, + service=ModelProviderService.CHAT, provider_name=ModelProviderName.OPENAI, - prompt_token_cost=0.06/1000, - completion_token_cost=0.12/1000, + prompt_token_cost=0.06 / 1000, + completion_token_cost=0.12 / 1000, max_tokens=32768, + has_function_call_api=True, ), ] } @@ -111,17 +120,22 @@ chat_model_mapping = { OpenAIModelName.GPT3: [OpenAIModelName.GPT3_v1, OpenAIModelName.GPT3_v2], OpenAIModelName.GPT3_16k: [OpenAIModelName.GPT3_v2_16k], OpenAIModelName.GPT4: [OpenAIModelName.GPT4_v1, OpenAIModelName.GPT4_v2], - OpenAIModelName.GPT4_32k: [OpenAIModelName.GPT4_v1_32k, OpenAIModelName.GPT4_v2_32k], + OpenAIModelName.GPT4_32k: [ + OpenAIModelName.GPT4_v1_32k, + OpenAIModelName.GPT4_v2_32k, + ], } for base, copies in chat_model_mapping.items(): for copy in copies: - copy_info = LanguageModelInfo(**OPEN_AI_LANGUAGE_MODELS[base].__dict__) + copy_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[base].__dict__) copy_info.name = copy - OPEN_AI_LANGUAGE_MODELS[copy] = copy_info + OPEN_AI_CHAT_MODELS[copy] = copy_info + if copy.endswith(("-0301", "-0314")): + copy_info.has_function_call_api = False OPEN_AI_MODELS = { - **OPEN_AI_LANGUAGE_MODELS, + **OPEN_AI_CHAT_MODELS, **OPEN_AI_EMBEDDING_MODELS, } @@ -141,7 +155,9 @@ class OpenAISettings(ModelProviderSettings): budget: OpenAIModelProviderBudget -class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider): +class OpenAIProvider( + Configurable[OpenAISettings], ChatModelProvider, EmbeddingModelProvider +): default_settings = OpenAISettings( name="openai_provider", description="Provides access to OpenAI's API.", @@ -163,8 +179,6 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider ), ) - logger = logging.getLogger("model_providers.OpenAIProvider") - def __init__( self, settings: OpenAISettings, @@ -181,7 +195,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider num_retries=self._configuration.retries_per_request, ) - self._create_completion = retry_handler(_create_completion) + self._create_chat_completion = retry_handler(_create_chat_completion) self._create_embedding = retry_handler(_create_embedding) def get_token_limit(self, model_name: str) -> int: @@ -192,16 +206,22 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider """Get the remaining budget.""" return self._budget.remaining_budget - def count_tokens(self, text: str, model_name: OpenAIModelName) -> int: - encoding = tiktoken.encoding_for_model(model_name) + @classmethod + def get_tokenizer(cls, model_name: OpenAIModelName) -> ModelTokenizer: + return tiktoken.encoding_for_model(model_name) + + @classmethod + def count_tokens(cls, text: str, model_name: OpenAIModelName) -> int: + encoding = cls.get_tokenizer(model_name) return len(encoding.encode(text)) + @classmethod def count_message_tokens( - self, - messages: LanguageModelMessage | list[LanguageModelMessage], + cls, + messages: ChatMessage | list[ChatMessage], model_name: OpenAIModelName, ) -> int: - if isinstance(messages, LanguageModelMessage): + if isinstance(messages, ChatMessage): messages = [messages] if model_name.startswith("gpt-3.5-turbo"): @@ -223,7 +243,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider try: encoding = tiktoken.encoding_for_model(encoding_model) except KeyError: - self.logger.warn( + cls._logger.warn( f"Model {model_name} not found. Defaulting to cl100k_base encoding." ) encoding = tiktoken.get_encoding("cl100k_base") @@ -238,31 +258,31 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> return num_tokens - async def create_language_completion( + async def create_chat_completion( self, - model_prompt: list[LanguageModelMessage], + model_prompt: list[ChatMessage], model_name: OpenAIModelName, - completion_parser: Callable[[dict], dict], - functions: list[LanguageModelFunction] = [], + completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None, + functions: list[CompletionModelFunction] = [], **kwargs, - ) -> LanguageModelResponse: + ) -> ChatModelResponse[_T]: """Create a completion using the OpenAI API.""" completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs) - response = await self._create_completion( + response = await self._create_chat_completion( messages=model_prompt, **completion_kwargs, ) response_args = { - "model_info": OPEN_AI_LANGUAGE_MODELS[model_name], + "model_info": OPEN_AI_CHAT_MODELS[model_name], "prompt_tokens_used": response.usage.prompt_tokens, "completion_tokens_used": response.usage.completion_tokens, } - parsed_response = completion_parser( - response.choices[0].message.to_dict_recursive() - ) - response = LanguageModelResponse( - content=parsed_response, **response_args + response_message = response.choices[0].message.to_dict_recursive() + response = ChatModelResponse( + response=response_message, + parsed_result=completion_parser(response_message), + **response_args, ) self._budget.update_usage_and_cost(response) return response @@ -293,7 +313,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider def _get_completion_kwargs( self, model_name: OpenAIModelName, - functions: list[LanguageModelFunction], + functions: list[CompletionModelFunction], **kwargs, ) -> dict: """Get kwargs for completion API call. @@ -312,7 +332,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider **self._credentials.unmasked(), } if functions: - completion_kwargs["functions"] = functions + completion_kwargs["functions"] = [f.schema for f in functions] return completion_kwargs @@ -359,8 +379,8 @@ async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding: ) -async def _create_completion( - messages: list[LanguageModelMessage], *_, **kwargs +async def _create_chat_completion( + messages: list[ChatMessage], *_, **kwargs ) -> openai.Completion: """Create a chat completion using the OpenAI API. @@ -369,21 +389,17 @@ async def _create_completion( Returns: The completion. - """ - messages = [message.dict() for message in messages] - if "functions" in kwargs: - kwargs["functions"] = [function.json_schema for function in kwargs["functions"]] + raw_messages = [ + message.dict(include={"role", "content", "function_call", "name"}) + for message in messages + ] return await openai.ChatCompletion.acreate( - messages=messages, + messages=raw_messages, **kwargs, ) -_T = TypeVar("_T") -_P = ParamSpec("_P") - - class _OpenAIRetryHandler: """Retry Handler for OpenAI API call. diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py index 515cde701..816b573ea 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py @@ -1,6 +1,15 @@ import abc import enum -from typing import Callable, ClassVar +from typing import ( + Callable, + ClassVar, + Generic, + Literal, + Optional, + Protocol, + TypedDict, + TypeVar, +) from pydantic import BaseModel, Field, SecretStr, validator @@ -13,33 +22,105 @@ from autogpt.core.resource.schema import ( ProviderUsage, ResourceType, ) +from autogpt.core.utils.json_schema import JSONSchema class ModelProviderService(str, enum.Enum): """A ModelService describes what kind of service the model provides.""" EMBEDDING = "embedding" - LANGUAGE = "language" - TEXT = "text" + CHAT = "chat_completion" + TEXT = "text_completion" class ModelProviderName(str, enum.Enum): OPENAI = "openai" -class MessageRole(str, enum.Enum): - USER = "user" - SYSTEM = "system" - ASSISTANT = "assistant" +class ChatMessage(BaseModel): + class Role(str, enum.Enum): + USER = "user" + SYSTEM = "system" + ASSISTANT = "assistant" + + FUNCTION = "function" + """May be used for the return value of function calls""" + + role: Role + content: str + + @staticmethod + def assistant(content: str) -> "ChatMessage": + return ChatMessage(role=ChatMessage.Role.ASSISTANT, content=content) + + @staticmethod + def user(content: str) -> "ChatMessage": + return ChatMessage(role=ChatMessage.Role.USER, content=content) + + @staticmethod + def system(content: str) -> "ChatMessage": + return ChatMessage(role=ChatMessage.Role.SYSTEM, content=content) -class LanguageModelMessage(BaseModel): - role: MessageRole +class ChatMessageDict(TypedDict): + role: str content: str -class LanguageModelFunction(BaseModel): - json_schema: dict +class AssistantFunctionCall(BaseModel): + name: str + arguments: str + + +class AssistantFunctionCallDict(TypedDict): + name: str + arguments: str + + +class AssistantChatMessage(ChatMessage): + role: Literal["assistant"] + content: Optional[str] + function_call: Optional[AssistantFunctionCall] + + +class AssistantChatMessageDict(TypedDict, total=False): + role: str + content: str + function_call: AssistantFunctionCallDict + + +class CompletionModelFunction(BaseModel): + """General representation object for LLM-callable functions.""" + + name: str + description: str + parameters: dict[str, "JSONSchema"] + + @property + def schema(self) -> dict[str, str | dict | list]: + """Returns an OpenAI-consumable function specification""" + + return { + "name": self.name, + "description": self.description, + "parameters": { + "type": "object", + "properties": { + name: param.dump() for name, param in self.parameters.items() + }, + "required": [ + name for name, param in self.parameters.items() if param.required + ], + }, + } + + @staticmethod + def parse(schema: dict) -> "CompletionModelFunction": + return CompletionModelFunction( + name=schema["name"], + description=schema["description"], + parameters=JSONSchema.parse_properties(schema["parameters"]), + ) class ModelInfo(BaseModel): @@ -47,7 +128,6 @@ class ModelInfo(BaseModel): Would be lovely to eventually get this directly from APIs, but needs to be scraped from websites for now. - """ name: str @@ -123,12 +203,12 @@ class ModelProviderBudget(ProviderBudget): """Update the usage and cost of the provider.""" model_info = model_response.model_info self.usage.update_usage(model_response) - incremental_cost = ( + incurred_cost = ( model_response.completion_tokens_used * model_info.completion_token_cost + model_response.prompt_tokens_used * model_info.prompt_token_cost ) - self.total_cost += incremental_cost - self.remaining_budget -= incremental_cost + self.total_cost += incurred_cost + self.remaining_budget -= incurred_cost class ModelProviderSettings(ProviderSettings): @@ -140,12 +220,16 @@ class ModelProviderSettings(ProviderSettings): class ModelProvider(abc.ABC): """A ModelProvider abstracts the details of a particular provider of models.""" - defaults: ClassVar[ModelProviderSettings] + default_settings: ClassVar[ModelProviderSettings] @abc.abstractmethod def count_tokens(self, text: str, model_name: str) -> int: ... + @abc.abstractmethod + def get_tokenizer(self, model_name: str) -> "ModelTokenizer": + ... + @abc.abstractmethod def get_token_limit(self, model_name: str) -> int: ... @@ -155,6 +239,18 @@ class ModelProvider(abc.ABC): ... +class ModelTokenizer(Protocol): + """A ModelTokenizer provides tokenization specific to a model.""" + + @abc.abstractmethod + def encode(self, text: str) -> list: + ... + + @abc.abstractmethod + def decode(self, tokens: list) -> str: + ... + + #################### # Embedding Models # #################### @@ -193,40 +289,45 @@ class EmbeddingModelProvider(ModelProvider): ... -################### -# Language Models # -################### +############### +# Chat Models # +############### -class LanguageModelInfo(ModelInfo): +class ChatModelInfo(ModelInfo): """Struct for language model information.""" - llm_service = ModelProviderService.LANGUAGE + llm_service = ModelProviderService.CHAT max_tokens: int + has_function_call_api: bool = False -class LanguageModelResponse(ModelResponse): +_T = TypeVar("_T") + + +class ChatModelResponse(ModelResponse, Generic[_T]): """Standard response struct for a response from a language model.""" - content: dict = None + response: AssistantChatMessageDict + parsed_result: _T = None -class LanguageModelProvider(ModelProvider): +class ChatModelProvider(ModelProvider): @abc.abstractmethod def count_message_tokens( self, - messages: LanguageModelMessage | list[LanguageModelMessage], + messages: ChatMessage | list[ChatMessage], model_name: str, ) -> int: ... @abc.abstractmethod - async def create_language_completion( + async def create_chat_completion( self, - model_prompt: list[LanguageModelMessage], - functions: list[LanguageModelFunction], + model_prompt: list[ChatMessage], model_name: str, - completion_parser: Callable[[dict], dict], + completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None, + functions: list[CompletionModelFunction] = [], **kwargs, - ) -> LanguageModelResponse: + ) -> ChatModelResponse[_T]: ... diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py b/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py index 56fca9759..7343df0f3 100644 --- a/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py +++ b/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py @@ -35,7 +35,7 @@ autogpt.add_command(make_settings) async def run(settings_file: str, pdb: bool) -> None: """Run the Auto-GPT agent.""" click.echo("Running Auto-GPT agent...") - settings_file = Path(settings_file) + settings_file: Path = Path(settings_file) settings = {} if settings_file.exists(): settings = yaml.safe_load(settings_file.read_text()) diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/main.py b/autogpts/autogpt/autogpt/core/runner/cli_app/main.py index 3c01ca9ba..1d58d170c 100644 --- a/autogpts/autogpt/autogpt/core/runner/cli_app/main.py +++ b/autogpts/autogpt/autogpt/core/runner/cli_app/main.py @@ -2,7 +2,8 @@ import click from autogpt.core.agent import AgentSettings, SimpleAgent from autogpt.core.runner.client_lib.logging import ( - configure_root_logger, get_client_logger + configure_root_logger, + get_client_logger, ) from autogpt.core.runner.client_lib.parser import ( parse_ability_result, diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py index 5e01ebf8a..6d263b6ad 100644 --- a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py @@ -1,6 +1,6 @@ import logging -from .config import configure_root_logger, FancyConsoleFormatter, BelowLevelFilter +from .config import BelowLevelFilter, FancyConsoleFormatter, configure_root_logger from .helpers import dump_prompt diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py index 5d4ea6098..d341f16ca 100644 --- a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py @@ -2,12 +2,12 @@ from math import ceil, floor from typing import TYPE_CHECKING if TYPE_CHECKING: - from autogpt.core.planning import LanguageModelPrompt + from autogpt.core.prompting import ChatPrompt SEPARATOR_LENGTH = 42 -def dump_prompt(prompt: "LanguageModelPrompt") -> str: +def dump_prompt(prompt: "ChatPrompt") -> str: def separator(text: str): half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2 return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}" diff --git a/autogpts/autogpt/autogpt/core/utils/json_schema.py b/autogpts/autogpt/autogpt/core/utils/json_schema.py new file mode 100644 index 000000000..702a6d7dc --- /dev/null +++ b/autogpts/autogpt/autogpt/core/utils/json_schema.py @@ -0,0 +1,116 @@ +import enum +import json +from logging import Logger +from typing import Literal, Optional + +from jsonschema import Draft7Validator +from pydantic import BaseModel + + +class JSONSchema(BaseModel): + class Type(str, enum.Enum): + STRING = "string" + ARRAY = "array" + OBJECT = "object" + NUMBER = "number" + INTEGER = "integer" + BOOLEAN = "boolean" + + # TODO: add docstrings + description: Optional[str] = None + type: Optional[Type] = None + enum: Optional[list] = None + required: bool = False + items: Optional["JSONSchema"] = None + properties: Optional[dict[str, "JSONSchema"]] = None + minimum: Optional[int | float] = None + maximum: Optional[int | float] = None + minItems: Optional[int] = None + maxItems: Optional[int] = None + + def to_dict(self) -> dict: + schema: dict = { + "type": self.type.value if self.type else None, + "description": self.description, + } + if self.type == "array": + if self.items: + schema["items"] = self.items.dump() + schema["minItems"] = self.minItems + schema["maxItems"] = self.maxItems + elif self.type == "object": + if self.properties: + schema["properties"] = { + name: prop.dump() for name, prop in self.properties.items() + } + schema["required"] = [ + name for name, prop in self.properties.items() if prop.required + ] + elif self.enum: + schema["enum"] = self.enum + else: + schema["minumum"] = self.minimum + schema["maximum"] = self.maximum + + schema = {k: v for k, v in schema.items() if v is not None} + + return schema + + @staticmethod + def from_dict(schema: dict) -> "JSONSchema": + return JSONSchema( + description=schema.get("description"), + type=schema["type"], + enum=schema["enum"] if "enum" in schema else None, + items=JSONSchema.from_dict(schema["items"]) if "items" in schema else None, + properties=JSONSchema.parse_properties(schema) + if schema["type"] == "object" + else None, + minimum=schema.get("minimum"), + maximum=schema.get("maximum"), + minItems=schema.get("minItems"), + maxItems=schema.get("maxItems"), + ) + + @staticmethod + def parse_properties(schema_node: dict) -> dict[str, "JSONSchema"]: + properties = ( + {k: JSONSchema.from_dict(v) for k, v in schema_node["properties"].items()} + if "properties" in schema_node + else {} + ) + if "required" in schema_node: + for k, v in properties.items(): + v.required = k in schema_node["required"] + return properties + + def validate_object( + self, object: object, logger: Logger + ) -> tuple[Literal[True], None] | tuple[Literal[False], list]: + """ + Validates a dictionary object against the JSONSchema. + + Params: + object: The dictionary object to validate. + schema (JSONSchema): The JSONSchema to validate against. + + Returns: + tuple: A tuple where the first element is a boolean indicating whether the object is valid or not, + and the second element is a list of errors found in the object, or None if the object is valid. + """ + validator = Draft7Validator(self.dump()) + + if errors := sorted(validator.iter_errors(object), key=lambda e: e.path): + for error in errors: + logger.debug(f"JSON Validation Error: {error}") + + logger.error(json.dumps(object, indent=4)) + logger.error("The following issues were found:") + + for error in errors: + logger.error(f"Error: {error.message}") + return False, errors + + logger.debug("The JSON object is valid.") + + return True, None