AutoGPT/core: improve `model_providers` typing and tooling

* Make .schema model names less pedantic

* Rename LanguageModel* objects to ChatModel* or CompletionModel* where appropriate

* Add `JSONSchema` utility class in `core.utils`

* Use `JSONSchema` instead of untyped dicts for `Ability` and `CompletionModelFunction` parameter specification

* Add token counting methods to `ModelProvider` interface and implementations
pull/5286/head
Reinier van der Leer 2023-09-21 14:48:29 +02:00
parent 618e7606ef
commit 88f0ccfd7e
No known key found for this signature in database
GPG Key ID: CDC1180FDAE06193
28 changed files with 849 additions and 504 deletions

View File

@ -1,4 +1,8 @@
"""The command system provides a way to extend the functionality of the AI agent.""" """The command system provides a way to extend the functionality of the AI agent."""
from autogpt.core.ability.base import Ability, AbilityRegistry from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
from autogpt.core.ability.schema import AbilityResult from autogpt.core.ability.schema import AbilityResult
from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry from autogpt.core.ability.simple import (
AbilityRegistryConfiguration,
AbilityRegistrySettings,
SimpleAbilityRegistry,
)

View File

@ -5,11 +5,13 @@ from typing import Any, ClassVar
import inflection import inflection
from pydantic import Field from pydantic import Field
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.configuration import SystemConfiguration from autogpt.core.configuration import SystemConfiguration
from autogpt.core.planning.simple import LanguageModelConfiguration from autogpt.core.planning.simple import LanguageModelConfiguration
from autogpt.core.plugin.base import PluginLocation from autogpt.core.plugin.base import PluginLocation
from autogpt.core.resource.model_providers import CompletionModelFunction
from autogpt.core.utils.json_schema import JSONSchema
from .schema import AbilityResult
class AbilityConfiguration(SystemConfiguration): class AbilityConfiguration(SystemConfiguration):
@ -32,40 +34,34 @@ class Ability(abc.ABC):
"""The name of the ability.""" """The name of the ability."""
return inflection.underscore(cls.__name__) return inflection.underscore(cls.__name__)
@property
@classmethod @classmethod
@abc.abstractmethod @abc.abstractmethod
def description(cls) -> str: def description(cls) -> str:
"""A detailed description of what the ability does.""" """A detailed description of what the ability does."""
... ...
@property
@classmethod @classmethod
@abc.abstractmethod @abc.abstractmethod
def arguments(cls) -> dict: def parameters(cls) -> dict[str, JSONSchema]:
"""A dict of arguments in standard json schema format."""
... ...
@classmethod
def required_arguments(cls) -> list[str]:
"""A list of required arguments."""
return []
@abc.abstractmethod @abc.abstractmethod
async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult: async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult:
... ...
def __str__(self) -> str: def __str__(self) -> str:
return pformat(self.dump()) return pformat(self.spec)
def dump(self) -> dict: @property
return { @classmethod
"name": self.name(), def spec(cls) -> CompletionModelFunction:
"description": self.description(), return CompletionModelFunction(
"parameters": { name=cls.name(),
"type": "object", description=cls.description,
"properties": self.arguments(), parameters=cls.parameters,
"required": self.required_arguments(), )
},
}
class AbilityRegistry(abc.ABC): class AbilityRegistry(abc.ABC):
@ -80,7 +76,7 @@ class AbilityRegistry(abc.ABC):
... ...
@abc.abstractmethod @abc.abstractmethod
def dump_abilities(self) -> list[dict]: def dump_abilities(self) -> list[CompletionModelFunction]:
... ...
@abc.abstractmethod @abc.abstractmethod

View File

@ -1,8 +1,10 @@
import logging import logging
from typing import ClassVar
from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult from autogpt.core.ability.schema import AbilityResult
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
from autogpt.core.utils.json_schema import JSONSchema
class CreateNewAbility(Ability): class CreateNewAbility(Ability):
@ -21,75 +23,63 @@ class CreateNewAbility(Ability):
self._logger = logger self._logger = logger
self._configuration = configuration self._configuration = configuration
@classmethod description: ClassVar[str] = "Create a new ability by writing python code."
def description(cls) -> str:
return "Create a new ability by writing python code."
@classmethod parameters: ClassVar[dict[str, JSONSchema]] = {
def arguments(cls) -> dict: "ability_name": JSONSchema(
return { description="A meaningful and concise name for the new ability.",
"ability_name": { type=JSONSchema.Type.STRING,
"type": "string", required=True,
"description": "A meaningful and concise name for the new ability.", ),
}, "description": JSONSchema(
"description": { description="A detailed description of the ability and its uses, including any limitations.",
"type": "string", type=JSONSchema.Type.STRING,
"description": "A detailed description of the ability and its uses, including any limitations.", required=True,
}, ),
"arguments": { "arguments": JSONSchema(
"type": "array", description="A list of arguments that the ability will accept.",
"items": { type=JSONSchema.Type.ARRAY,
"type": "object", items=JSONSchema(
"properties": { type=JSONSchema.Type.OBJECT,
"name": { properties={
"type": "string", "name": JSONSchema(
"description": "The name of the argument.", description="The name of the argument.",
}, type=JSONSchema.Type.STRING,
"type": { ),
"type": "string", "type": JSONSchema(
"description": "The type of the argument. Must be a standard json schema type.", description="The type of the argument. Must be a standard json schema type.",
}, type=JSONSchema.Type.STRING,
"description": { ),
"type": "string", "description": JSONSchema(
"description": "A detailed description of the argument and its uses.", description="A detailed description of the argument and its uses.",
}, type=JSONSchema.Type.STRING,
}, ),
},
"description": "A list of arguments that the ability will accept.",
},
"required_arguments": {
"type": "array",
"items": {
"type": "string",
"description": "The names of the arguments that are required.",
},
"description": "A list of the names of the arguments that are required.",
},
"package_requirements": {
"type": "array",
"items": {
"type": "string",
"description": "The of the Python package that is required to execute the ability.",
},
"description": "A list of the names of the Python packages that are required to execute the ability.",
},
"code": {
"type": "string",
"description": "The Python code that will be executed when the ability is called.",
}, },
),
),
"required_arguments": JSONSchema(
description="A list of the names of the arguments that are required.",
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
description="The names of the arguments that are required.",
type=JSONSchema.Type.STRING,
),
),
"package_requirements": JSONSchema(
description="A list of the names of the Python packages that are required to execute the ability.",
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
description="The of the Python package that is required to execute the ability.",
type=JSONSchema.Type.STRING,
),
),
"code": JSONSchema(
description="The Python code that will be executed when the ability is called.",
type=JSONSchema.Type.STRING,
required=True,
),
} }
@classmethod
def required_arguments(cls) -> list[str]:
return [
"ability_name",
"description",
"arguments",
"required_arguments",
"package_requirements",
"code",
]
async def __call__( async def __call__(
self, self,
ability_name: str, ability_name: str,

View File

@ -1,13 +1,20 @@
import logging import logging
import os import os
from typing import ClassVar
from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.workspace import Workspace from autogpt.core.workspace import Workspace
class ReadFile(Ability): class ReadFile(Ability):
default_configuration = AbilityConfiguration( default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.ReadFile",
),
packages_required=["unstructured"], packages_required=["unstructured"],
workspace_required=True, workspace_required=True,
) )
@ -20,17 +27,13 @@ class ReadFile(Ability):
self._logger = logger self._logger = logger
self._workspace = workspace self._workspace = workspace
@property description: ClassVar[str] = "Read and parse all text from a file."
def description(self) -> str:
return "Read and parse all text from a file."
@property parameters: ClassVar[dict[str, JSONSchema]] = {
def arguments(self) -> dict: "filename": JSONSchema(
return { type=JSONSchema.Type.STRING,
"filename": { description="The name of the file to read.",
"type": "string", ),
"description": "The name of the file to read.",
},
} }
def _check_preconditions(self, filename: str) -> AbilityResult | None: def _check_preconditions(self, filename: str) -> AbilityResult | None:
@ -92,6 +95,10 @@ class ReadFile(Ability):
class WriteFile(Ability): class WriteFile(Ability):
default_configuration = AbilityConfiguration( default_configuration = AbilityConfiguration(
location=PluginLocation(
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
storage_route="autogpt.core.ability.builtins.WriteFile",
),
packages_required=["unstructured"], packages_required=["unstructured"],
workspace_required=True, workspace_required=True,
) )
@ -104,21 +111,17 @@ class WriteFile(Ability):
self._logger = logger self._logger = logger
self._workspace = workspace self._workspace = workspace
@property description: ClassVar[str] = "Write text to a file."
def description(self) -> str:
return "Write text to a file."
@property parameters: ClassVar[dict[str, JSONSchema]] = {
def arguments(self) -> dict: "filename": JSONSchema(
return { type=JSONSchema.Type.STRING,
"filename": { description="The name of the file to write.",
"type": "string", ),
"description": "The name of the file to write.", "contents": JSONSchema(
}, type=JSONSchema.Type.STRING,
"contents": { description="The contents of the file to write.",
"type": "string", ),
"description": "The contents of the file to write.",
},
} }
def _check_preconditions( def _check_preconditions(

View File

@ -1,16 +1,17 @@
import logging import logging
from typing import ClassVar
from autogpt.core.ability.base import Ability, AbilityConfiguration from autogpt.core.ability.base import Ability, AbilityConfiguration
from autogpt.core.ability.schema import AbilityResult from autogpt.core.ability.schema import AbilityResult
from autogpt.core.planning.simple import LanguageModelConfiguration from autogpt.core.planning.simple import LanguageModelConfiguration
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelMessage, ChatMessage,
LanguageModelProvider, ChatModelProvider,
MessageRole,
ModelProviderName, ModelProviderName,
OpenAIModelName, OpenAIModelName,
) )
from autogpt.core.utils.json_schema import JSONSchema
class QueryLanguageModel(Ability): class QueryLanguageModel(Ability):
@ -30,49 +31,33 @@ class QueryLanguageModel(Ability):
self, self,
logger: logging.Logger, logger: logging.Logger,
configuration: AbilityConfiguration, configuration: AbilityConfiguration,
language_model_provider: LanguageModelProvider, language_model_provider: ChatModelProvider,
): ):
self._logger = logger self._logger = logger
self._configuration = configuration self._configuration = configuration
self._language_model_provider = language_model_provider self._language_model_provider = language_model_provider
@classmethod description: ClassVar[str] = (
def description(cls) -> str: "Query a language model."
return "Query a language model. A query should be a question and any relevant context." " A query should be a question and any relevant context."
)
@classmethod parameters: ClassVar[dict[str, JSONSchema]] = {
def arguments(cls) -> dict: "query": JSONSchema(
return { type=JSONSchema.Type.STRING,
"query": { description="A query for a language model. A query should contain a question and any relevant context.",
"type": "string", )
"description": "A query for a language model. A query should contain a question and any relevant context.",
},
} }
@classmethod
def required_arguments(cls) -> list[str]:
return ["query"]
async def __call__(self, query: str) -> AbilityResult: async def __call__(self, query: str) -> AbilityResult:
messages = [ model_response = await self._language_model_provider.create_chat_completion(
LanguageModelMessage( model_prompt=[ChatMessage.user(query)],
content=query,
role=MessageRole.USER,
),
]
model_response = await self._language_model_provider.create_language_completion(
model_prompt=messages,
functions=[], functions=[],
model_name=self._configuration.language_model_required.model_name, model_name=self._configuration.language_model_required.model_name,
completion_parser=self._parse_response,
) )
return AbilityResult( return AbilityResult(
ability_name=self.name(), ability_name=self.name(),
ability_args={"query": query}, ability_args={"query": query},
success=True, success=True,
message=model_response.content["content"], message=model_response.response["content"],
) )
@staticmethod
def _parse_response(response_content: dict) -> dict:
return {"content": response_content["content"]}

View File

@ -7,7 +7,8 @@ from autogpt.core.configuration import Configurable, SystemConfiguration, System
from autogpt.core.memory.base import Memory from autogpt.core.memory.base import Memory
from autogpt.core.plugin.simple import SimplePluginService from autogpt.core.plugin.simple import SimplePluginService
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelProvider, ChatModelProvider,
CompletionModelFunction,
ModelProviderName, ModelProviderName,
) )
from autogpt.core.workspace.base import Workspace from autogpt.core.workspace.base import Workspace
@ -41,7 +42,7 @@ class SimpleAbilityRegistry(AbilityRegistry, Configurable):
logger: logging.Logger, logger: logging.Logger,
memory: Memory, memory: Memory,
workspace: Workspace, workspace: Workspace,
model_providers: dict[ModelProviderName, LanguageModelProvider], model_providers: dict[ModelProviderName, ChatModelProvider],
): ):
self._configuration = settings.configuration self._configuration = settings.configuration
self._logger = logger self._logger = logger
@ -78,12 +79,10 @@ class SimpleAbilityRegistry(AbilityRegistry, Configurable):
self._abilities.append(ability) self._abilities.append(ability)
def list_abilities(self) -> list[str]: def list_abilities(self) -> list[str]:
return [ return [f"{ability.name()}: {ability.description}" for ability in self._abilities]
f"{ability.name()}: {ability.description()}" for ability in self._abilities
]
def dump_abilities(self) -> list[dict]: def dump_abilities(self) -> list[CompletionModelFunction]:
return [ability.dump() for ability in self._abilities] return [ability.spec for ability in self._abilities]
def get_ability(self, ability_name: str) -> Ability: def get_ability(self, ability_name: str) -> Ability:
for ability in self._abilities: for ability in self._abilities:

View File

@ -19,7 +19,11 @@ from autogpt.core.plugin.simple import (
PluginStorageFormat, PluginStorageFormat,
SimplePluginService, SimplePluginService,
) )
from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings from autogpt.core.resource.model_providers import (
CompletionModelFunction,
OpenAIProvider,
OpenAISettings,
)
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
@ -178,7 +182,7 @@ class SimpleAgent(Agent, Configurable):
agent_goals=self._configuration.goals, agent_goals=self._configuration.goals,
abilities=self._ability_registry.list_abilities(), abilities=self._ability_registry.list_abilities(),
) )
tasks = [Task.parse_obj(task) for task in plan.content["task_list"]] tasks = [Task.parse_obj(task) for task in plan.parsed_result["task_list"]]
# TODO: Should probably do a step to evaluate the quality of the generated tasks, # TODO: Should probably do a step to evaluate the quality of the generated tasks,
# and ensure that they have actionable ready and acceptance criteria # and ensure that they have actionable ready and acceptance criteria
@ -186,7 +190,7 @@ class SimpleAgent(Agent, Configurable):
self._task_queue.extend(tasks) self._task_queue.extend(tasks)
self._task_queue.sort(key=lambda t: t.priority, reverse=True) self._task_queue.sort(key=lambda t: t.priority, reverse=True)
self._task_queue[-1].context.status = TaskStatus.READY self._task_queue[-1].context.status = TaskStatus.READY
return plan.content return plan.parsed_result
async def determine_next_ability(self, *args, **kwargs): async def determine_next_ability(self, *args, **kwargs):
if not self._task_queue: if not self._task_queue:
@ -202,7 +206,7 @@ class SimpleAgent(Agent, Configurable):
self._ability_registry.dump_abilities(), self._ability_registry.dump_abilities(),
) )
self._current_task = task self._current_task = task
self._next_ability = next_ability.content self._next_ability = next_ability.parsed_result
return self._current_task, self._next_ability return self._current_task, self._next_ability
async def execute_next_ability(self, user_input: str, *args, **kwargs): async def execute_next_ability(self, user_input: str, *args, **kwargs):
@ -236,7 +240,11 @@ class SimpleAgent(Agent, Configurable):
task.context.status = TaskStatus.IN_PROGRESS task.context.status = TaskStatus.IN_PROGRESS
return task return task
async def _choose_next_ability(self, task: Task, ability_schema: list[dict]): async def _choose_next_ability(
self,
task: Task,
ability_specs: list[CompletionModelFunction],
):
"""Choose the next ability to use for the task.""" """Choose the next ability to use for the task."""
self._logger.debug(f"Choosing next ability for task {task}.") self._logger.debug(f"Choosing next ability for task {task}.")
if task.context.cycle_count > self._configuration.max_task_cycle_count: if task.context.cycle_count > self._configuration.max_task_cycle_count:
@ -247,7 +255,7 @@ class SimpleAgent(Agent, Configurable):
raise NotImplementedError raise NotImplementedError
else: else:
next_ability = await self._planning.determine_next_ability( next_ability = await self._planning.determine_next_ability(
task, ability_schema task, ability_specs
) )
return next_ability return next_ability
@ -328,7 +336,7 @@ class SimpleAgent(Agent, Configurable):
user_objective, user_objective,
) )
return model_response.content return model_response.parsed_result
@classmethod @classmethod
def provision_agent( def provision_agent(

View File

@ -1,10 +1,12 @@
import abc import abc
import functools
import typing import typing
from typing import Any, Generic, TypeVar from typing import Any, Generic, TypeVar
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@functools.wraps(Field)
def UserConfigurable(*args, **kwargs): def UserConfigurable(*args, **kwargs):
return Field(*args, **kwargs, user_configurable=True) return Field(*args, **kwargs, user_configurable=True)

View File

@ -1,7 +1,3 @@
"""The planning system organizes the Agent's activities.""" """The planning system organizes the Agent's activities."""
from autogpt.core.planning.schema import ( from autogpt.core.planning.schema import Task, TaskStatus, TaskType
Task,
TaskStatus,
TaskType,
)
from autogpt.core.planning.simple import PlannerSettings, SimplePlanner from autogpt.core.planning.simple import PlannerSettings, SimplePlanner

View File

@ -1,12 +1,3 @@
from .initial_plan import ( from .initial_plan import InitialPlan, InitialPlanConfiguration
InitialPlan, from .name_and_goals import NameAndGoals, NameAndGoalsConfiguration
InitialPlanConfiguration, from .next_ability import NextAbility, NextAbilityConfiguration
)
from .name_and_goals import (
NameAndGoals,
NameAndGoalsConfiguration,
)
from .next_ability import (
NextAbility,
NextAbilityConfiguration,
)

View File

@ -3,16 +3,14 @@ import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.schema import Task, TaskType from autogpt.core.planning.schema import Task, TaskType
from autogpt.core.prompting import PromptStrategy from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import ( from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
LanguageModelClassification,
LanguageModelPrompt,
)
from autogpt.core.prompting.utils import json_loads, to_numbered_list from autogpt.core.prompting.utils import json_loads, to_numbered_list
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelFunction, AssistantChatMessageDict,
LanguageModelMessage, ChatMessage,
MessageRole, CompletionModelFunction,
) )
from autogpt.core.utils.json_schema import JSONSchema
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -47,66 +45,56 @@ class InitialPlan(PromptStrategy):
"You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}" "You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
) )
DEFAULT_CREATE_PLAN_FUNCTION = { DEFAULT_CREATE_PLAN_FUNCTION = CompletionModelFunction(
"name": "create_initial_agent_plan", name="create_initial_agent_plan",
"description": "Creates a set of tasks that forms the initial plan for an autonomous agent.", description="Creates a set of tasks that forms the initial plan for an autonomous agent.",
"parameters": { parameters={
"type": "object", "task_list": JSONSchema(
"properties": { type=JSONSchema.Type.ARRAY,
"task_list": { items=JSONSchema(
"type": "array", type=JSONSchema.Type.OBJECT,
"items": { properties={
"type": "object", "objective": JSONSchema(
"properties": { type=JSONSchema.Type.STRING,
"objective": { description="An imperative verb phrase that succinctly describes the task.",
"type": "string", ),
"description": "An imperative verb phrase that succinctly describes the task.", "type": JSONSchema(
type=JSONSchema.Type.STRING,
description="A categorization for the task.",
enum=[t.value for t in TaskType],
),
"acceptance_criteria": JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.STRING,
description="A list of measurable and testable criteria that must be met for the task to be considered complete.",
),
),
"priority": JSONSchema(
type=JSONSchema.Type.INTEGER,
description="A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
minimum=1,
maximum=10,
),
"ready_criteria": JSONSchema(
type=JSONSchema.Type.ARRAY,
items=JSONSchema(
type=JSONSchema.Type.STRING,
description="A list of measurable and testable criteria that must be met before the task can be started.",
),
),
}, },
"type": { ),
"type": "string", ),
"description": "A categorization for the task. ",
"enum": [t.value for t in TaskType],
}, },
"acceptance_criteria": { )
"type": "array",
"items": {
"type": "string",
"description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
},
},
"priority": {
"type": "integer",
"description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
"minimum": 1,
"maximum": 10,
},
"ready_criteria": {
"type": "array",
"items": {
"type": "string",
"description": "A list of measurable and testable criteria that must be met before the task can be started.",
},
},
},
"required": [
"objective",
"type",
"acceptance_criteria",
"priority",
"ready_criteria",
],
},
},
},
},
}
default_configuration: InitialPlanConfiguration = InitialPlanConfiguration( default_configuration: InitialPlanConfiguration = InitialPlanConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL, model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO, system_info=DEFAULT_SYSTEM_INFO,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION, create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION.schema,
) )
def __init__( def __init__(
@ -121,7 +109,7 @@ class InitialPlan(PromptStrategy):
self._system_prompt_template = system_prompt_template self._system_prompt_template = system_prompt_template
self._system_info = system_info self._system_info = system_info
self._user_prompt_template = user_prompt_template self._user_prompt_template = user_prompt_template
self._create_plan_function = create_plan_function self._create_plan_function = CompletionModelFunction.parse(create_plan_function)
@property @property
def model_classification(self) -> LanguageModelClassification: def model_classification(self) -> LanguageModelClassification:
@ -137,7 +125,7 @@ class InitialPlan(PromptStrategy):
api_budget: float, api_budget: float,
current_time: str, current_time: str,
**kwargs, **kwargs,
) -> LanguageModelPrompt: ) -> ChatPrompt:
template_kwargs = { template_kwargs = {
"agent_name": agent_name, "agent_name": agent_name,
"agent_role": agent_role, "agent_role": agent_role,
@ -154,28 +142,23 @@ class InitialPlan(PromptStrategy):
self._system_info, **template_kwargs self._system_info, **template_kwargs
) )
system_prompt = LanguageModelMessage( system_prompt = ChatMessage.system(
role=MessageRole.SYSTEM, self._system_prompt_template.format(**template_kwargs),
content=self._system_prompt_template.format(**template_kwargs),
) )
user_prompt = LanguageModelMessage( user_prompt = ChatMessage.user(
role=MessageRole.USER, self._user_prompt_template.format(**template_kwargs),
content=self._user_prompt_template.format(**template_kwargs),
)
create_plan_function = LanguageModelFunction(
json_schema=self._create_plan_function,
) )
return LanguageModelPrompt( return ChatPrompt(
messages=[system_prompt, user_prompt], messages=[system_prompt, user_prompt],
functions=[create_plan_function], functions=[self._create_plan_function],
# TODO: # TODO:
tokens_used=0, tokens_used=0,
) )
def parse_response_content( def parse_response_content(
self, self,
response_content: dict, response_content: AssistantChatMessageDict,
) -> dict: ) -> dict:
"""Parse the actual text response from the objective model. """Parse the actual text response from the objective model.
@ -184,7 +167,6 @@ class InitialPlan(PromptStrategy):
Returns: Returns:
The parsed response. The parsed response.
""" """
try: try:
parsed_response = json_loads(response_content["function_call"]["arguments"]) parsed_response = json_loads(response_content["function_call"]["arguments"])

View File

@ -2,16 +2,14 @@ import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.prompting import PromptStrategy from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import ( from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
LanguageModelClassification,
LanguageModelPrompt,
)
from autogpt.core.prompting.utils import json_loads from autogpt.core.prompting.utils import json_loads
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelFunction, AssistantChatMessageDict,
LanguageModelMessage, ChatMessage,
MessageRole, CompletionModelFunction,
) )
from autogpt.core.utils.json_schema import JSONSchema
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -53,43 +51,39 @@ class NameAndGoals(PromptStrategy):
DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""' DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""'
DEFAULT_CREATE_AGENT_FUNCTION = { DEFAULT_CREATE_AGENT_FUNCTION = CompletionModelFunction(
"name": "create_agent", name="create_agent",
"description": ("Create a new autonomous AI agent to complete a given task."), description="Create a new autonomous AI agent to complete a given task.",
"parameters": { parameters={
"type": "object", "agent_name": JSONSchema(
"properties": { type=JSONSchema.Type.STRING,
"agent_name": { description="A short role-based name for an autonomous agent.",
"type": "string", ),
"description": "A short role-based name for an autonomous agent.", "agent_role": JSONSchema(
}, type=JSONSchema.Type.STRING,
"agent_role": { description="An informative one sentence description of what the AI agent does",
"type": "string", ),
"description": "An informative one sentence description of what the AI agent does", "agent_goals": JSONSchema(
}, type=JSONSchema.Type.ARRAY,
"agent_goals": { minItems=1,
"type": "array", maxItems=5,
"minItems": 1, items=JSONSchema(
"maxItems": 5, type=JSONSchema.Type.STRING,
"items": { ),
"type": "string", description=(
},
"description": (
"One to five highly effective goals that are optimally aligned with the completion of a " "One to five highly effective goals that are optimally aligned with the completion of a "
"specific task. The number and complexity of the goals should correspond to the " "specific task. The number and complexity of the goals should correspond to the "
"complexity of the agent's primary objective." "complexity of the agent's primary objective."
), ),
),
}, },
}, )
"required": ["agent_name", "agent_role", "agent_goals"],
},
}
default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration( default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL, model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt=DEFAULT_SYSTEM_PROMPT, system_prompt=DEFAULT_SYSTEM_PROMPT,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION, create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION.schema,
) )
def __init__( def __init__(
@ -97,34 +91,29 @@ class NameAndGoals(PromptStrategy):
model_classification: LanguageModelClassification, model_classification: LanguageModelClassification,
system_prompt: str, system_prompt: str,
user_prompt_template: str, user_prompt_template: str,
create_agent_function: str, create_agent_function: dict,
): ):
self._model_classification = model_classification self._model_classification = model_classification
self._system_prompt_message = system_prompt self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template self._user_prompt_template = user_prompt_template
self._create_agent_function = create_agent_function self._create_agent_function = CompletionModelFunction.parse(
create_agent_function
)
@property @property
def model_classification(self) -> LanguageModelClassification: def model_classification(self) -> LanguageModelClassification:
return self._model_classification return self._model_classification
def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt: def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = LanguageModelMessage( system_message = ChatMessage.system(self._system_prompt_message)
role=MessageRole.SYSTEM, user_message = ChatMessage.user(
content=self._system_prompt_message, self._user_prompt_template.format(
)
user_message = LanguageModelMessage(
role=MessageRole.USER,
content=self._user_prompt_template.format(
user_objective=user_objective, user_objective=user_objective,
),
) )
create_agent_function = LanguageModelFunction(
json_schema=self._create_agent_function,
) )
prompt = LanguageModelPrompt( prompt = ChatPrompt(
messages=[system_message, user_message], messages=[system_message, user_message],
functions=[create_agent_function], functions=[self._create_agent_function],
# TODO # TODO
tokens_used=0, tokens_used=0,
) )
@ -132,7 +121,7 @@ class NameAndGoals(PromptStrategy):
def parse_response_content( def parse_response_content(
self, self,
response_content: dict, response_content: AssistantChatMessageDict,
) -> dict: ) -> dict:
"""Parse the actual text response from the objective model. """Parse the actual text response from the objective model.

View File

@ -1,18 +1,16 @@
import logging import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import (
LanguageModelClassification,
LanguageModelPrompt,
)
from autogpt.core.prompting.utils import json_loads, to_numbered_list
from autogpt.core.planning.schema import Task from autogpt.core.planning.schema import Task
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
from autogpt.core.prompting.utils import json_loads, to_numbered_list
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelFunction, AssistantChatMessageDict,
LanguageModelMessage, ChatMessage,
MessageRole, CompletionModelFunction,
) )
from autogpt.core.utils.json_schema import JSONSchema
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -51,18 +49,18 @@ class NextAbility(PromptStrategy):
) )
DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = { DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
"motivation": { "motivation": JSONSchema(
"type": "string", type=JSONSchema.Type.STRING,
"description": "Your justification for choosing choosing this function instead of a different one.", description="Your justification for choosing choosing this function instead of a different one.",
}, ),
"self_criticism": { "self_criticism": JSONSchema(
"type": "string", type=JSONSchema.Type.STRING,
"description": "Thoughtful self-criticism that explains why this function may not be the best choice.", description="Thoughtful self-criticism that explains why this function may not be the best choice.",
}, ),
"reasoning": { "reasoning": JSONSchema(
"type": "string", type=JSONSchema.Type.STRING,
"description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.", description="Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
}, ),
} }
default_configuration: NextAbilityConfiguration = NextAbilityConfiguration( default_configuration: NextAbilityConfiguration = NextAbilityConfiguration(
@ -70,7 +68,9 @@ class NextAbility(PromptStrategy):
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO, system_info=DEFAULT_SYSTEM_INFO,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS, additional_ability_arguments={
k: v.dump() for k, v in DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS.items()
},
) )
def __init__( def __init__(
@ -85,7 +85,11 @@ class NextAbility(PromptStrategy):
self._system_prompt_template = system_prompt_template self._system_prompt_template = system_prompt_template
self._system_info = system_info self._system_info = system_info
self._user_prompt_template = user_prompt_template self._user_prompt_template = user_prompt_template
self._additional_ability_arguments = additional_ability_arguments self._additional_ability_arguments = JSONSchema.parse_properties(
additional_ability_arguments
)
for p in self._additional_ability_arguments.values():
p.required = True
@property @property
def model_classification(self) -> LanguageModelClassification: def model_classification(self) -> LanguageModelClassification:
@ -94,12 +98,12 @@ class NextAbility(PromptStrategy):
def build_prompt( def build_prompt(
self, self,
task: Task, task: Task,
ability_schema: list[dict], ability_specs: list[CompletionModelFunction],
os_info: str, os_info: str,
api_budget: float, api_budget: float,
current_time: str, current_time: str,
**kwargs, **kwargs,
) -> LanguageModelPrompt: ) -> ChatPrompt:
template_kwargs = { template_kwargs = {
"os_info": os_info, "os_info": os_info,
"api_budget": api_budget, "api_budget": api_budget,
@ -107,13 +111,8 @@ class NextAbility(PromptStrategy):
**kwargs, **kwargs,
} }
for ability in ability_schema: for ability in ability_specs:
ability["parameters"]["properties"].update( ability.parameters.update(self._additional_ability_arguments)
self._additional_ability_arguments
)
ability["parameters"]["required"] += list(
self._additional_ability_arguments.keys()
)
template_kwargs["task_objective"] = task.objective template_kwargs["task_objective"] = task.objective
template_kwargs["cycle_count"] = task.context.cycle_count template_kwargs["cycle_count"] = task.context.cycle_count
@ -143,28 +142,23 @@ class NextAbility(PromptStrategy):
**template_kwargs, **template_kwargs,
) )
system_prompt = LanguageModelMessage( system_prompt = ChatMessage.system(
role=MessageRole.SYSTEM, self._system_prompt_template.format(**template_kwargs)
content=self._system_prompt_template.format(**template_kwargs),
) )
user_prompt = LanguageModelMessage( user_prompt = ChatMessage.user(
role=MessageRole.USER, self._user_prompt_template.format(**template_kwargs)
content=self._user_prompt_template.format(**template_kwargs),
) )
functions = [
LanguageModelFunction(json_schema=ability) for ability in ability_schema
]
return LanguageModelPrompt( return ChatPrompt(
messages=[system_prompt, user_prompt], messages=[system_prompt, user_prompt],
functions=functions, functions=ability_specs,
# TODO: # TODO:
tokens_used=0, tokens_used=0,
) )
def parse_response_content( def parse_response_content(
self, self,
response_content: dict, response_content: AssistantChatMessageDict,
) -> dict: ) -> dict:
"""Parse the actual text response from the objective model. """Parse the actual text response from the objective model.
@ -177,7 +171,9 @@ class NextAbility(PromptStrategy):
""" """
try: try:
function_name = response_content["function_call"]["name"] function_name = response_content["function_call"]["name"]
function_arguments = json_loads(response_content["function_call"]["arguments"]) function_arguments = json_loads(
response_content["function_call"]["arguments"]
)
parsed_response = { parsed_response = {
"motivation": function_arguments.pop("motivation"), "motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"), "self_criticism": function_arguments.pop("self_criticism"),

View File

@ -15,8 +15,9 @@ from autogpt.core.planning.schema import Task
from autogpt.core.prompting import PromptStrategy from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import LanguageModelClassification from autogpt.core.prompting.schema import LanguageModelClassification
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
LanguageModelProvider, ChatModelProvider,
LanguageModelResponse, ChatModelResponse,
CompletionModelFunction,
ModelProviderName, ModelProviderName,
OpenAIModelName, OpenAIModelName,
) )
@ -82,14 +83,14 @@ class SimplePlanner(Configurable):
self, self,
settings: PlannerSettings, settings: PlannerSettings,
logger: logging.Logger, logger: logging.Logger,
model_providers: dict[ModelProviderName, LanguageModelProvider], model_providers: dict[ModelProviderName, ChatModelProvider],
workspace: Workspace = None, # Workspace is not available during bootstrapping. workspace: Workspace = None, # Workspace is not available during bootstrapping.
) -> None: ) -> None:
self._configuration = settings.configuration self._configuration = settings.configuration
self._logger = logger self._logger = logger
self._workspace = workspace self._workspace = workspace
self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {} self._providers: dict[LanguageModelClassification, ChatModelProvider] = {}
for model, model_config in self._configuration.models.items(): for model, model_config in self._configuration.models.items():
self._providers[model] = model_providers[model_config.provider_name] self._providers[model] = model_providers[model_config.provider_name]
@ -105,7 +106,7 @@ class SimplePlanner(Configurable):
), ),
} }
async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse: async def decide_name_and_goals(self, user_objective: str) -> ChatModelResponse:
return await self.chat_with_model( return await self.chat_with_model(
self._prompt_strategies["name_and_goals"], self._prompt_strategies["name_and_goals"],
user_objective=user_objective, user_objective=user_objective,
@ -117,7 +118,7 @@ class SimplePlanner(Configurable):
agent_role: str, agent_role: str,
agent_goals: list[str], agent_goals: list[str],
abilities: list[str], abilities: list[str],
) -> LanguageModelResponse: ) -> ChatModelResponse:
return await self.chat_with_model( return await self.chat_with_model(
self._prompt_strategies["initial_plan"], self._prompt_strategies["initial_plan"],
agent_name=agent_name, agent_name=agent_name,
@ -129,19 +130,19 @@ class SimplePlanner(Configurable):
async def determine_next_ability( async def determine_next_ability(
self, self,
task: Task, task: Task,
ability_schema: list[dict], ability_specs: list[CompletionModelFunction],
): ):
return await self.chat_with_model( return await self.chat_with_model(
self._prompt_strategies["next_ability"], self._prompt_strategies["next_ability"],
task=task, task=task,
ability_schema=ability_schema, ability_specs=ability_specs,
) )
async def chat_with_model( async def chat_with_model(
self, self,
prompt_strategy: PromptStrategy, prompt_strategy: PromptStrategy,
**kwargs, **kwargs,
) -> LanguageModelResponse: ) -> ChatModelResponse:
model_classification = prompt_strategy.model_classification model_classification = prompt_strategy.model_classification
model_configuration = self._configuration.models[model_classification].dict() model_configuration = self._configuration.models[model_classification].dict()
self._logger.debug(f"Using model configuration: {model_configuration}") self._logger.debug(f"Using model configuration: {model_configuration}")
@ -153,13 +154,13 @@ class SimplePlanner(Configurable):
prompt = prompt_strategy.build_prompt(**template_kwargs) prompt = prompt_strategy.build_prompt(**template_kwargs)
self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n") self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n")
response = await provider.create_language_completion( response = await provider.create_chat_completion(
model_prompt=prompt.messages, model_prompt=prompt.messages,
functions=prompt.functions, functions=prompt.functions,
**model_configuration, **model_configuration,
completion_parser=prompt_strategy.parse_response_content, completion_parser=prompt_strategy.parse_response_content,
) )
return LanguageModelResponse.parse_obj(response.dict()) return response
def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy): def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
provider = self._providers[strategy.model_classification] provider = self._providers[strategy.model_classification]

View File

@ -10,15 +10,15 @@ if TYPE_CHECKING:
from autogpt.core.ability import Ability, AbilityRegistry from autogpt.core.ability import Ability, AbilityRegistry
from autogpt.core.memory import Memory from autogpt.core.memory import Memory
from autogpt.core.resource.model_providers import ( from autogpt.core.resource.model_providers import (
ChatModelProvider,
EmbeddingModelProvider, EmbeddingModelProvider,
LanguageModelProvider,
) )
# Expand to other types as needed # Expand to other types as needed
PluginType = ( PluginType = (
Type[Ability] # Swappable now Type[Ability] # Swappable now
| Type[AbilityRegistry] # Swappable maybe never | Type[AbilityRegistry] # Swappable maybe never
| Type[LanguageModelProvider] # Swappable soon | Type[ChatModelProvider] # Swappable soon
| Type[EmbeddingModelProvider] # Swappable soon | Type[EmbeddingModelProvider] # Swappable soon
| Type[Memory] # Swappable now | Type[Memory] # Swappable now
# | Type[Planner] # Swappable soon # | Type[Planner] # Swappable soon

View File

@ -543,6 +543,41 @@ files = [
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
] ]
[[package]]
name = "jsonschema"
version = "4.19.1"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"},
{file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"},
]
[package.dependencies]
attrs = ">=22.2.0"
jsonschema-specifications = ">=2023.03.6"
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
name = "jsonschema-specifications"
version = "2023.7.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
{file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
]
[package.dependencies]
referencing = ">=0.28.0"
[[package]] [[package]]
name = "multidict" name = "multidict"
version = "6.0.4" version = "6.0.4"
@ -832,6 +867,21 @@ files = [
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
] ]
[[package]]
name = "referencing"
version = "0.30.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"},
{file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"},
]
[package.dependencies]
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
[[package]] [[package]]
name = "regex" name = "regex"
version = "2023.8.8" version = "2023.8.8"
@ -950,6 +1000,112 @@ urllib3 = ">=1.21.1,<3"
socks = ["PySocks (>=1.5.6,!=1.5.7)"] socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "rpds-py"
version = "0.10.3"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.8"
files = [
{file = "rpds_py-0.10.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e"},
{file = "rpds_py-0.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff"},
{file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a"},
{file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33"},
{file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec"},
{file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e"},
{file = "rpds_py-0.10.3-cp310-none-win32.whl", hash = "sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391"},
{file = "rpds_py-0.10.3-cp310-none-win_amd64.whl", hash = "sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860"},
{file = "rpds_py-0.10.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453"},
{file = "rpds_py-0.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f"},
{file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1"},
{file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836"},
{file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8"},
{file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff"},
{file = "rpds_py-0.10.3-cp311-none-win32.whl", hash = "sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48"},
{file = "rpds_py-0.10.3-cp311-none-win_amd64.whl", hash = "sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c"},
{file = "rpds_py-0.10.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b"},
{file = "rpds_py-0.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6"},
{file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a"},
{file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25"},
{file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2"},
{file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f"},
{file = "rpds_py-0.10.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3"},
{file = "rpds_py-0.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a"},
{file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c"},
{file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf"},
{file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496"},
{file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b"},
{file = "rpds_py-0.10.3-cp38-none-win32.whl", hash = "sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1"},
{file = "rpds_py-0.10.3-cp38-none-win_amd64.whl", hash = "sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee"},
{file = "rpds_py-0.10.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623"},
{file = "rpds_py-0.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9"},
{file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154"},
{file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e"},
{file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc"},
{file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1"},
{file = "rpds_py-0.10.3-cp39-none-win32.whl", hash = "sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065"},
{file = "rpds_py-0.10.3-cp39-none-win_amd64.whl", hash = "sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557"},
{file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac"},
{file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97"},
{file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599"},
{file = "rpds_py-0.10.3.tar.gz", hash = "sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb"},
]
[[package]] [[package]]
name = "sniffio" name = "sniffio"
version = "1.3.0" version = "1.3.0"
@ -1056,24 +1212,24 @@ telegram = ["requests"]
[[package]] [[package]]
name = "typing-extensions" name = "typing-extensions"
version = "4.7.1" version = "4.8.0"
description = "Backported and Experimental Type Hints for Python 3.7+" description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.8"
files = [ files = [
{file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
{file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
] ]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "2.0.4" version = "2.0.5"
description = "HTTP library with thread-safe connection pooling, file post, and more." description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
{file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, {file = "urllib3-2.0.5-py3-none-any.whl", hash = "sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e"},
{file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, {file = "urllib3-2.0.5.tar.gz", hash = "sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594"},
] ]
[package.extras] [package.extras]
@ -1186,4 +1342,4 @@ multidict = ">=4.0"
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.10" python-versions = "^3.10"
content-hash = "50ad53581d2716ee6927df7200b2522acfaad35aadc76909bdab4073c49a824e" content-hash = "e5acc4decd67692ad0f08e38d380e1a474ef480449b78dd14321dccf1ad3ca5a"

View File

@ -1,8 +1,8 @@
from .base import PromptStrategy from .base import PromptStrategy
from .schema import LanguageModelClassification, LanguageModelPrompt from .schema import ChatPrompt, LanguageModelClassification
__all__ = [ __all__ = [
"LanguageModelClassification", "LanguageModelClassification",
"LanguageModelPrompt", "ChatPrompt",
"PromptStrategy", "PromptStrategy",
] ]

View File

@ -2,12 +2,9 @@ import abc
from typing import Generic, TypeVar from typing import Generic, TypeVar
from autogpt.core.configuration import SystemConfiguration from autogpt.core.configuration import SystemConfiguration
from autogpt.core.resource.model_providers import AssistantChatMessageDict
from .schema import ( from .schema import ChatPrompt, LanguageModelClassification
LanguageModelClassification,
LanguageModelPrompt,
)
IN = TypeVar("IN", bound=dict) IN = TypeVar("IN", bound=dict)
OUT = TypeVar("OUT") OUT = TypeVar("OUT")
@ -22,9 +19,9 @@ class PromptStrategy(abc.ABC, Generic[IN, OUT]):
... ...
@abc.abstractmethod @abc.abstractmethod
def build_prompt(self, *_, **kwargs: IN) -> LanguageModelPrompt: def build_prompt(self, *_, **kwargs: IN) -> ChatPrompt:
... ...
@abc.abstractmethod @abc.abstractmethod
def parse_response_content(self, response_content: dict) -> OUT: def parse_response_content(self, response_content: AssistantChatMessageDict) -> OUT:
... ...

View File

@ -3,8 +3,9 @@ import enum
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from autogpt.core.resource.model_providers.schema import ( from autogpt.core.resource.model_providers.schema import (
LanguageModelFunction, ChatMessage,
LanguageModelMessage, ChatMessageDict,
CompletionModelFunction,
) )
@ -20,12 +21,14 @@ class LanguageModelClassification(str, enum.Enum):
SMART_MODEL = "smart_model" SMART_MODEL = "smart_model"
class LanguageModelPrompt(BaseModel): class ChatPrompt(BaseModel):
messages: list[LanguageModelMessage] messages: list[ChatMessage]
functions: list[LanguageModelFunction] = Field(default_factory=list) functions: list[CompletionModelFunction] = Field(default_factory=list)
def raw(self) -> list[ChatMessageDict]:
return [m.dict() for m in self.messages]
def __str__(self): def __str__(self):
return "\n\n".join( return "\n\n".join(
f"{m.role.value.upper()}: {m.content}" f"{m.role.value.upper()}: {m.content}" for m in self.messages
for m in self.messages
) )

View File

@ -25,6 +25,7 @@ click = "^8.1.7"
colorama = "^0.4.6" colorama = "^0.4.6"
distro = "^1.8.0" distro = "^1.8.0"
inflection = "^0.5.1" inflection = "^0.5.1"
jsonschema = "^4.19.1"
openai = "^0.28.0" openai = "^0.28.0"
pydantic = "^1.10.12" pydantic = "^1.10.12"
pyyaml = "^6.0.0" pyyaml = "^6.0.0"

View File

@ -1,20 +1,25 @@
from autogpt.core.resource.model_providers.openai import ( from .openai import (
OPEN_AI_CHAT_MODELS,
OPEN_AI_EMBEDDING_MODELS,
OPEN_AI_MODELS, OPEN_AI_MODELS,
OpenAIModelName, OpenAIModelName,
OpenAIProvider, OpenAIProvider,
OpenAISettings, OpenAISettings,
) )
from autogpt.core.resource.model_providers.schema import ( from .schema import (
AssistantChatMessage,
AssistantChatMessageDict,
AssistantFunctionCall,
AssistantFunctionCallDict,
ChatMessage,
ChatModelInfo,
ChatModelProvider,
ChatModelResponse,
CompletionModelFunction,
Embedding, Embedding,
EmbeddingModelInfo, EmbeddingModelInfo,
EmbeddingModelProvider, EmbeddingModelProvider,
EmbeddingModelResponse, EmbeddingModelResponse,
LanguageModelFunction,
LanguageModelInfo,
LanguageModelMessage,
LanguageModelProvider,
LanguageModelResponse,
MessageRole,
ModelInfo, ModelInfo,
ModelProvider, ModelProvider,
ModelProviderBudget, ModelProviderBudget,
@ -24,19 +29,23 @@ from autogpt.core.resource.model_providers.schema import (
ModelProviderSettings, ModelProviderSettings,
ModelProviderUsage, ModelProviderUsage,
ModelResponse, ModelResponse,
ModelTokenizer,
) )
__all__ = [ __all__ = [
"AssistantChatMessage",
"AssistantChatMessageDict",
"AssistantFunctionCall",
"AssistantFunctionCallDict",
"ChatMessage",
"ChatModelInfo",
"ChatModelProvider",
"ChatModelResponse",
"CompletionModelFunction",
"Embedding", "Embedding",
"EmbeddingModelInfo", "EmbeddingModelInfo",
"EmbeddingModelProvider", "EmbeddingModelProvider",
"EmbeddingModelResponse", "EmbeddingModelResponse",
"LanguageModelFunction",
"LanguageModelInfo",
"LanguageModelMessage",
"LanguageModelProvider",
"LanguageModelResponse",
"MessageRole",
"ModelInfo", "ModelInfo",
"ModelProvider", "ModelProvider",
"ModelProviderBudget", "ModelProviderBudget",
@ -46,7 +55,10 @@ __all__ = [
"ModelProviderSettings", "ModelProviderSettings",
"ModelProviderUsage", "ModelProviderUsage",
"ModelResponse", "ModelResponse",
"ModelTokenizer",
"OPEN_AI_MODELS", "OPEN_AI_MODELS",
"OPEN_AI_CHAT_MODELS",
"OPEN_AI_EMBEDDING_MODELS",
"OpenAIModelName", "OpenAIModelName",
"OpenAIProvider", "OpenAIProvider",
"OpenAISettings", "OpenAISettings",

View File

@ -15,23 +15,28 @@ from autogpt.core.configuration import (
UserConfigurable, UserConfigurable,
) )
from autogpt.core.resource.model_providers.schema import ( from autogpt.core.resource.model_providers.schema import (
AssistantChatMessageDict,
ChatMessage,
ChatModelInfo,
ChatModelProvider,
ChatModelResponse,
CompletionModelFunction,
Embedding, Embedding,
EmbeddingModelProvider,
EmbeddingModelInfo, EmbeddingModelInfo,
EmbeddingModelProvider,
EmbeddingModelResponse, EmbeddingModelResponse,
LanguageModelFunction,
LanguageModelMessage,
LanguageModelProvider,
LanguageModelInfo,
LanguageModelResponse,
ModelProviderBudget, ModelProviderBudget,
ModelProviderCredentials, ModelProviderCredentials,
ModelProviderName, ModelProviderName,
ModelProviderService, ModelProviderService,
ModelProviderSettings, ModelProviderSettings,
ModelProviderUsage, ModelProviderUsage,
ModelTokenizer,
) )
_T = TypeVar("_T")
_P = ParamSpec("_P")
OpenAIEmbeddingParser = Callable[[Embedding], Embedding] OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
OpenAIChatParser = Callable[[str], dict] OpenAIChatParser = Callable[[str], dict]
@ -69,40 +74,44 @@ OPEN_AI_EMBEDDING_MODELS = {
} }
OPEN_AI_LANGUAGE_MODELS = { OPEN_AI_CHAT_MODELS = {
info.name: info info.name: info
for info in [ for info in [
LanguageModelInfo( ChatModelInfo(
name=OpenAIModelName.GPT3, name=OpenAIModelName.GPT3,
service=ModelProviderService.LANGUAGE, service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI, provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.0015 / 1000, prompt_token_cost=0.0015 / 1000,
completion_token_cost=0.002 / 1000, completion_token_cost=0.002 / 1000,
max_tokens=4096, max_tokens=4096,
has_function_call_api=True,
), ),
LanguageModelInfo( ChatModelInfo(
name=OpenAIModelName.GPT3_16k, name=OpenAIModelName.GPT3_16k,
service=ModelProviderService.LANGUAGE, service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI, provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.003 / 1000, prompt_token_cost=0.003 / 1000,
completion_token_cost=0.004 / 1000, completion_token_cost=0.004 / 1000,
max_tokens=16384, max_tokens=16384,
has_function_call_api=True,
), ),
LanguageModelInfo( ChatModelInfo(
name=OpenAIModelName.GPT4, name=OpenAIModelName.GPT4,
service=ModelProviderService.LANGUAGE, service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI, provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.03 / 1000, prompt_token_cost=0.03 / 1000,
completion_token_cost=0.06 / 1000, completion_token_cost=0.06 / 1000,
max_tokens=8192, max_tokens=8191,
has_function_call_api=True,
), ),
LanguageModelInfo( ChatModelInfo(
name=OpenAIModelName.GPT4_32k, name=OpenAIModelName.GPT4_32k,
service=ModelProviderService.LANGUAGE, service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI, provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.06 / 1000, prompt_token_cost=0.06 / 1000,
completion_token_cost=0.12 / 1000, completion_token_cost=0.12 / 1000,
max_tokens=32768, max_tokens=32768,
has_function_call_api=True,
), ),
] ]
} }
@ -111,17 +120,22 @@ chat_model_mapping = {
OpenAIModelName.GPT3: [OpenAIModelName.GPT3_v1, OpenAIModelName.GPT3_v2], OpenAIModelName.GPT3: [OpenAIModelName.GPT3_v1, OpenAIModelName.GPT3_v2],
OpenAIModelName.GPT3_16k: [OpenAIModelName.GPT3_v2_16k], OpenAIModelName.GPT3_16k: [OpenAIModelName.GPT3_v2_16k],
OpenAIModelName.GPT4: [OpenAIModelName.GPT4_v1, OpenAIModelName.GPT4_v2], OpenAIModelName.GPT4: [OpenAIModelName.GPT4_v1, OpenAIModelName.GPT4_v2],
OpenAIModelName.GPT4_32k: [OpenAIModelName.GPT4_v1_32k, OpenAIModelName.GPT4_v2_32k], OpenAIModelName.GPT4_32k: [
OpenAIModelName.GPT4_v1_32k,
OpenAIModelName.GPT4_v2_32k,
],
} }
for base, copies in chat_model_mapping.items(): for base, copies in chat_model_mapping.items():
for copy in copies: for copy in copies:
copy_info = LanguageModelInfo(**OPEN_AI_LANGUAGE_MODELS[base].__dict__) copy_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[base].__dict__)
copy_info.name = copy copy_info.name = copy
OPEN_AI_LANGUAGE_MODELS[copy] = copy_info OPEN_AI_CHAT_MODELS[copy] = copy_info
if copy.endswith(("-0301", "-0314")):
copy_info.has_function_call_api = False
OPEN_AI_MODELS = { OPEN_AI_MODELS = {
**OPEN_AI_LANGUAGE_MODELS, **OPEN_AI_CHAT_MODELS,
**OPEN_AI_EMBEDDING_MODELS, **OPEN_AI_EMBEDDING_MODELS,
} }
@ -141,7 +155,9 @@ class OpenAISettings(ModelProviderSettings):
budget: OpenAIModelProviderBudget budget: OpenAIModelProviderBudget
class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider): class OpenAIProvider(
Configurable[OpenAISettings], ChatModelProvider, EmbeddingModelProvider
):
default_settings = OpenAISettings( default_settings = OpenAISettings(
name="openai_provider", name="openai_provider",
description="Provides access to OpenAI's API.", description="Provides access to OpenAI's API.",
@ -163,8 +179,6 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
), ),
) )
logger = logging.getLogger("model_providers.OpenAIProvider")
def __init__( def __init__(
self, self,
settings: OpenAISettings, settings: OpenAISettings,
@ -181,7 +195,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
num_retries=self._configuration.retries_per_request, num_retries=self._configuration.retries_per_request,
) )
self._create_completion = retry_handler(_create_completion) self._create_chat_completion = retry_handler(_create_chat_completion)
self._create_embedding = retry_handler(_create_embedding) self._create_embedding = retry_handler(_create_embedding)
def get_token_limit(self, model_name: str) -> int: def get_token_limit(self, model_name: str) -> int:
@ -192,16 +206,22 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
"""Get the remaining budget.""" """Get the remaining budget."""
return self._budget.remaining_budget return self._budget.remaining_budget
def count_tokens(self, text: str, model_name: OpenAIModelName) -> int: @classmethod
encoding = tiktoken.encoding_for_model(model_name) def get_tokenizer(cls, model_name: OpenAIModelName) -> ModelTokenizer:
return tiktoken.encoding_for_model(model_name)
@classmethod
def count_tokens(cls, text: str, model_name: OpenAIModelName) -> int:
encoding = cls.get_tokenizer(model_name)
return len(encoding.encode(text)) return len(encoding.encode(text))
@classmethod
def count_message_tokens( def count_message_tokens(
self, cls,
messages: LanguageModelMessage | list[LanguageModelMessage], messages: ChatMessage | list[ChatMessage],
model_name: OpenAIModelName, model_name: OpenAIModelName,
) -> int: ) -> int:
if isinstance(messages, LanguageModelMessage): if isinstance(messages, ChatMessage):
messages = [messages] messages = [messages]
if model_name.startswith("gpt-3.5-turbo"): if model_name.startswith("gpt-3.5-turbo"):
@ -223,7 +243,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
try: try:
encoding = tiktoken.encoding_for_model(encoding_model) encoding = tiktoken.encoding_for_model(encoding_model)
except KeyError: except KeyError:
self.logger.warn( cls._logger.warn(
f"Model {model_name} not found. Defaulting to cl100k_base encoding." f"Model {model_name} not found. Defaulting to cl100k_base encoding."
) )
encoding = tiktoken.get_encoding("cl100k_base") encoding = tiktoken.get_encoding("cl100k_base")
@ -238,31 +258,31 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens return num_tokens
async def create_language_completion( async def create_chat_completion(
self, self,
model_prompt: list[LanguageModelMessage], model_prompt: list[ChatMessage],
model_name: OpenAIModelName, model_name: OpenAIModelName,
completion_parser: Callable[[dict], dict], completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None,
functions: list[LanguageModelFunction] = [], functions: list[CompletionModelFunction] = [],
**kwargs, **kwargs,
) -> LanguageModelResponse: ) -> ChatModelResponse[_T]:
"""Create a completion using the OpenAI API.""" """Create a completion using the OpenAI API."""
completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs) completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
response = await self._create_completion( response = await self._create_chat_completion(
messages=model_prompt, messages=model_prompt,
**completion_kwargs, **completion_kwargs,
) )
response_args = { response_args = {
"model_info": OPEN_AI_LANGUAGE_MODELS[model_name], "model_info": OPEN_AI_CHAT_MODELS[model_name],
"prompt_tokens_used": response.usage.prompt_tokens, "prompt_tokens_used": response.usage.prompt_tokens,
"completion_tokens_used": response.usage.completion_tokens, "completion_tokens_used": response.usage.completion_tokens,
} }
parsed_response = completion_parser( response_message = response.choices[0].message.to_dict_recursive()
response.choices[0].message.to_dict_recursive() response = ChatModelResponse(
) response=response_message,
response = LanguageModelResponse( parsed_result=completion_parser(response_message),
content=parsed_response, **response_args **response_args,
) )
self._budget.update_usage_and_cost(response) self._budget.update_usage_and_cost(response)
return response return response
@ -293,7 +313,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
def _get_completion_kwargs( def _get_completion_kwargs(
self, self,
model_name: OpenAIModelName, model_name: OpenAIModelName,
functions: list[LanguageModelFunction], functions: list[CompletionModelFunction],
**kwargs, **kwargs,
) -> dict: ) -> dict:
"""Get kwargs for completion API call. """Get kwargs for completion API call.
@ -312,7 +332,7 @@ class OpenAIProvider(Configurable, LanguageModelProvider, EmbeddingModelProvider
**self._credentials.unmasked(), **self._credentials.unmasked(),
} }
if functions: if functions:
completion_kwargs["functions"] = functions completion_kwargs["functions"] = [f.schema for f in functions]
return completion_kwargs return completion_kwargs
@ -359,8 +379,8 @@ async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
) )
async def _create_completion( async def _create_chat_completion(
messages: list[LanguageModelMessage], *_, **kwargs messages: list[ChatMessage], *_, **kwargs
) -> openai.Completion: ) -> openai.Completion:
"""Create a chat completion using the OpenAI API. """Create a chat completion using the OpenAI API.
@ -369,21 +389,17 @@ async def _create_completion(
Returns: Returns:
The completion. The completion.
""" """
messages = [message.dict() for message in messages] raw_messages = [
if "functions" in kwargs: message.dict(include={"role", "content", "function_call", "name"})
kwargs["functions"] = [function.json_schema for function in kwargs["functions"]] for message in messages
]
return await openai.ChatCompletion.acreate( return await openai.ChatCompletion.acreate(
messages=messages, messages=raw_messages,
**kwargs, **kwargs,
) )
_T = TypeVar("_T")
_P = ParamSpec("_P")
class _OpenAIRetryHandler: class _OpenAIRetryHandler:
"""Retry Handler for OpenAI API call. """Retry Handler for OpenAI API call.

View File

@ -1,6 +1,15 @@
import abc import abc
import enum import enum
from typing import Callable, ClassVar from typing import (
Callable,
ClassVar,
Generic,
Literal,
Optional,
Protocol,
TypedDict,
TypeVar,
)
from pydantic import BaseModel, Field, SecretStr, validator from pydantic import BaseModel, Field, SecretStr, validator
@ -13,33 +22,105 @@ from autogpt.core.resource.schema import (
ProviderUsage, ProviderUsage,
ResourceType, ResourceType,
) )
from autogpt.core.utils.json_schema import JSONSchema
class ModelProviderService(str, enum.Enum): class ModelProviderService(str, enum.Enum):
"""A ModelService describes what kind of service the model provides.""" """A ModelService describes what kind of service the model provides."""
EMBEDDING = "embedding" EMBEDDING = "embedding"
LANGUAGE = "language" CHAT = "chat_completion"
TEXT = "text" TEXT = "text_completion"
class ModelProviderName(str, enum.Enum): class ModelProviderName(str, enum.Enum):
OPENAI = "openai" OPENAI = "openai"
class MessageRole(str, enum.Enum): class ChatMessage(BaseModel):
class Role(str, enum.Enum):
USER = "user" USER = "user"
SYSTEM = "system" SYSTEM = "system"
ASSISTANT = "assistant" ASSISTANT = "assistant"
FUNCTION = "function"
"""May be used for the return value of function calls"""
class LanguageModelMessage(BaseModel): role: Role
role: MessageRole content: str
@staticmethod
def assistant(content: str) -> "ChatMessage":
return ChatMessage(role=ChatMessage.Role.ASSISTANT, content=content)
@staticmethod
def user(content: str) -> "ChatMessage":
return ChatMessage(role=ChatMessage.Role.USER, content=content)
@staticmethod
def system(content: str) -> "ChatMessage":
return ChatMessage(role=ChatMessage.Role.SYSTEM, content=content)
class ChatMessageDict(TypedDict):
role: str
content: str content: str
class LanguageModelFunction(BaseModel): class AssistantFunctionCall(BaseModel):
json_schema: dict name: str
arguments: str
class AssistantFunctionCallDict(TypedDict):
name: str
arguments: str
class AssistantChatMessage(ChatMessage):
role: Literal["assistant"]
content: Optional[str]
function_call: Optional[AssistantFunctionCall]
class AssistantChatMessageDict(TypedDict, total=False):
role: str
content: str
function_call: AssistantFunctionCallDict
class CompletionModelFunction(BaseModel):
"""General representation object for LLM-callable functions."""
name: str
description: str
parameters: dict[str, "JSONSchema"]
@property
def schema(self) -> dict[str, str | dict | list]:
"""Returns an OpenAI-consumable function specification"""
return {
"name": self.name,
"description": self.description,
"parameters": {
"type": "object",
"properties": {
name: param.dump() for name, param in self.parameters.items()
},
"required": [
name for name, param in self.parameters.items() if param.required
],
},
}
@staticmethod
def parse(schema: dict) -> "CompletionModelFunction":
return CompletionModelFunction(
name=schema["name"],
description=schema["description"],
parameters=JSONSchema.parse_properties(schema["parameters"]),
)
class ModelInfo(BaseModel): class ModelInfo(BaseModel):
@ -47,7 +128,6 @@ class ModelInfo(BaseModel):
Would be lovely to eventually get this directly from APIs, but needs to be Would be lovely to eventually get this directly from APIs, but needs to be
scraped from websites for now. scraped from websites for now.
""" """
name: str name: str
@ -123,12 +203,12 @@ class ModelProviderBudget(ProviderBudget):
"""Update the usage and cost of the provider.""" """Update the usage and cost of the provider."""
model_info = model_response.model_info model_info = model_response.model_info
self.usage.update_usage(model_response) self.usage.update_usage(model_response)
incremental_cost = ( incurred_cost = (
model_response.completion_tokens_used * model_info.completion_token_cost model_response.completion_tokens_used * model_info.completion_token_cost
+ model_response.prompt_tokens_used * model_info.prompt_token_cost + model_response.prompt_tokens_used * model_info.prompt_token_cost
) )
self.total_cost += incremental_cost self.total_cost += incurred_cost
self.remaining_budget -= incremental_cost self.remaining_budget -= incurred_cost
class ModelProviderSettings(ProviderSettings): class ModelProviderSettings(ProviderSettings):
@ -140,12 +220,16 @@ class ModelProviderSettings(ProviderSettings):
class ModelProvider(abc.ABC): class ModelProvider(abc.ABC):
"""A ModelProvider abstracts the details of a particular provider of models.""" """A ModelProvider abstracts the details of a particular provider of models."""
defaults: ClassVar[ModelProviderSettings] default_settings: ClassVar[ModelProviderSettings]
@abc.abstractmethod @abc.abstractmethod
def count_tokens(self, text: str, model_name: str) -> int: def count_tokens(self, text: str, model_name: str) -> int:
... ...
@abc.abstractmethod
def get_tokenizer(self, model_name: str) -> "ModelTokenizer":
...
@abc.abstractmethod @abc.abstractmethod
def get_token_limit(self, model_name: str) -> int: def get_token_limit(self, model_name: str) -> int:
... ...
@ -155,6 +239,18 @@ class ModelProvider(abc.ABC):
... ...
class ModelTokenizer(Protocol):
"""A ModelTokenizer provides tokenization specific to a model."""
@abc.abstractmethod
def encode(self, text: str) -> list:
...
@abc.abstractmethod
def decode(self, tokens: list) -> str:
...
#################### ####################
# Embedding Models # # Embedding Models #
#################### ####################
@ -193,40 +289,45 @@ class EmbeddingModelProvider(ModelProvider):
... ...
################### ###############
# Language Models # # Chat Models #
################### ###############
class LanguageModelInfo(ModelInfo): class ChatModelInfo(ModelInfo):
"""Struct for language model information.""" """Struct for language model information."""
llm_service = ModelProviderService.LANGUAGE llm_service = ModelProviderService.CHAT
max_tokens: int max_tokens: int
has_function_call_api: bool = False
class LanguageModelResponse(ModelResponse): _T = TypeVar("_T")
class ChatModelResponse(ModelResponse, Generic[_T]):
"""Standard response struct for a response from a language model.""" """Standard response struct for a response from a language model."""
content: dict = None response: AssistantChatMessageDict
parsed_result: _T = None
class LanguageModelProvider(ModelProvider): class ChatModelProvider(ModelProvider):
@abc.abstractmethod @abc.abstractmethod
def count_message_tokens( def count_message_tokens(
self, self,
messages: LanguageModelMessage | list[LanguageModelMessage], messages: ChatMessage | list[ChatMessage],
model_name: str, model_name: str,
) -> int: ) -> int:
... ...
@abc.abstractmethod @abc.abstractmethod
async def create_language_completion( async def create_chat_completion(
self, self,
model_prompt: list[LanguageModelMessage], model_prompt: list[ChatMessage],
functions: list[LanguageModelFunction],
model_name: str, model_name: str,
completion_parser: Callable[[dict], dict], completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None,
functions: list[CompletionModelFunction] = [],
**kwargs, **kwargs,
) -> LanguageModelResponse: ) -> ChatModelResponse[_T]:
... ...

View File

@ -35,7 +35,7 @@ autogpt.add_command(make_settings)
async def run(settings_file: str, pdb: bool) -> None: async def run(settings_file: str, pdb: bool) -> None:
"""Run the Auto-GPT agent.""" """Run the Auto-GPT agent."""
click.echo("Running Auto-GPT agent...") click.echo("Running Auto-GPT agent...")
settings_file = Path(settings_file) settings_file: Path = Path(settings_file)
settings = {} settings = {}
if settings_file.exists(): if settings_file.exists():
settings = yaml.safe_load(settings_file.read_text()) settings = yaml.safe_load(settings_file.read_text())

View File

@ -2,7 +2,8 @@ import click
from autogpt.core.agent import AgentSettings, SimpleAgent from autogpt.core.agent import AgentSettings, SimpleAgent
from autogpt.core.runner.client_lib.logging import ( from autogpt.core.runner.client_lib.logging import (
configure_root_logger, get_client_logger configure_root_logger,
get_client_logger,
) )
from autogpt.core.runner.client_lib.parser import ( from autogpt.core.runner.client_lib.parser import (
parse_ability_result, parse_ability_result,

View File

@ -1,6 +1,6 @@
import logging import logging
from .config import configure_root_logger, FancyConsoleFormatter, BelowLevelFilter from .config import BelowLevelFilter, FancyConsoleFormatter, configure_root_logger
from .helpers import dump_prompt from .helpers import dump_prompt

View File

@ -2,12 +2,12 @@ from math import ceil, floor
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
if TYPE_CHECKING: if TYPE_CHECKING:
from autogpt.core.planning import LanguageModelPrompt from autogpt.core.prompting import ChatPrompt
SEPARATOR_LENGTH = 42 SEPARATOR_LENGTH = 42
def dump_prompt(prompt: "LanguageModelPrompt") -> str: def dump_prompt(prompt: "ChatPrompt") -> str:
def separator(text: str): def separator(text: str):
half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2 half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}" return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"

View File

@ -0,0 +1,116 @@
import enum
import json
from logging import Logger
from typing import Literal, Optional
from jsonschema import Draft7Validator
from pydantic import BaseModel
class JSONSchema(BaseModel):
class Type(str, enum.Enum):
STRING = "string"
ARRAY = "array"
OBJECT = "object"
NUMBER = "number"
INTEGER = "integer"
BOOLEAN = "boolean"
# TODO: add docstrings
description: Optional[str] = None
type: Optional[Type] = None
enum: Optional[list] = None
required: bool = False
items: Optional["JSONSchema"] = None
properties: Optional[dict[str, "JSONSchema"]] = None
minimum: Optional[int | float] = None
maximum: Optional[int | float] = None
minItems: Optional[int] = None
maxItems: Optional[int] = None
def to_dict(self) -> dict:
schema: dict = {
"type": self.type.value if self.type else None,
"description": self.description,
}
if self.type == "array":
if self.items:
schema["items"] = self.items.dump()
schema["minItems"] = self.minItems
schema["maxItems"] = self.maxItems
elif self.type == "object":
if self.properties:
schema["properties"] = {
name: prop.dump() for name, prop in self.properties.items()
}
schema["required"] = [
name for name, prop in self.properties.items() if prop.required
]
elif self.enum:
schema["enum"] = self.enum
else:
schema["minumum"] = self.minimum
schema["maximum"] = self.maximum
schema = {k: v for k, v in schema.items() if v is not None}
return schema
@staticmethod
def from_dict(schema: dict) -> "JSONSchema":
return JSONSchema(
description=schema.get("description"),
type=schema["type"],
enum=schema["enum"] if "enum" in schema else None,
items=JSONSchema.from_dict(schema["items"]) if "items" in schema else None,
properties=JSONSchema.parse_properties(schema)
if schema["type"] == "object"
else None,
minimum=schema.get("minimum"),
maximum=schema.get("maximum"),
minItems=schema.get("minItems"),
maxItems=schema.get("maxItems"),
)
@staticmethod
def parse_properties(schema_node: dict) -> dict[str, "JSONSchema"]:
properties = (
{k: JSONSchema.from_dict(v) for k, v in schema_node["properties"].items()}
if "properties" in schema_node
else {}
)
if "required" in schema_node:
for k, v in properties.items():
v.required = k in schema_node["required"]
return properties
def validate_object(
self, object: object, logger: Logger
) -> tuple[Literal[True], None] | tuple[Literal[False], list]:
"""
Validates a dictionary object against the JSONSchema.
Params:
object: The dictionary object to validate.
schema (JSONSchema): The JSONSchema to validate against.
Returns:
tuple: A tuple where the first element is a boolean indicating whether the object is valid or not,
and the second element is a list of errors found in the object, or None if the object is valid.
"""
validator = Draft7Validator(self.dump())
if errors := sorted(validator.iter_errors(object), key=lambda e: e.path):
for error in errors:
logger.debug(f"JSON Validation Error: {error}")
logger.error(json.dumps(object, indent=4))
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
return False, errors
logger.debug("The JSON object is valid.")
return True, None