diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index 5c1b872b1..eb4801c28 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -75,6 +75,12 @@ GOOGLE_CLIENT_SECRET= TWITTER_CLIENT_ID= TWITTER_CLIENT_SECRET= +# Linear App +# Make a new workspace for your OAuth APP -- trust me +# https://linear.app/settings/api/applications/new +# Callback URL: http://localhost:3000/auth/integrations/oauth_callback +LINEAR_CLIENT_ID= +LINEAR_CLIENT_SECRET= ## ===== OPTIONAL API KEYS ===== ## diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index b68c04bad..d7dd468c4 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -1,9 +1,11 @@ +import enum from typing import Any, List from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType from backend.data.model import SchemaField from backend.util.mock import MockObject from backend.util.text import TextFormatter +from backend.util.type import convert formatter = TextFormatter() @@ -590,3 +592,47 @@ class CreateListBlock(Block): yield "list", input_data.values except Exception as e: yield "error", f"Failed to create list: {str(e)}" + + +class TypeOptions(enum.Enum): + STRING = "string" + NUMBER = "number" + BOOLEAN = "boolean" + LIST = "list" + DICTIONARY = "dictionary" + + +class UniversalTypeConverterBlock(Block): + class Input(BlockSchema): + value: Any = SchemaField( + description="The value to convert to a universal type." + ) + type: TypeOptions = SchemaField(description="The type to convert the value to.") + + class Output(BlockSchema): + value: Any = SchemaField(description="The converted value.") + + def __init__(self): + super().__init__( + id="95d1b990-ce13-4d88-9737-ba5c2070c97b", + description="This block is used to convert a value to a universal type.", + categories={BlockCategory.BASIC}, + input_schema=UniversalTypeConverterBlock.Input, + output_schema=UniversalTypeConverterBlock.Output, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + converted_value = convert( + input_data.value, + { + TypeOptions.STRING: str, + TypeOptions.NUMBER: float, + TypeOptions.BOOLEAN: bool, + TypeOptions.LIST: list, + TypeOptions.DICTIONARY: dict, + }[input_data.type], + ) + yield "value", converted_value + except Exception as e: + yield "error", f"Failed to convert value: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/_api.py b/autogpt_platform/backend/backend/blocks/linear/_api.py new file mode 100644 index 000000000..4639975e8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/_api.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +import json +from typing import Any, Dict, Optional + +from backend.blocks.linear._auth import LinearCredentials +from backend.blocks.linear.models import ( + CreateCommentResponse, + CreateIssueResponse, + Issue, + Project, +) +from backend.util.request import Requests + + +class LinearAPIException(Exception): + def __init__(self, message: str, status_code: int): + super().__init__(message) + self.status_code = status_code + + +class LinearClient: + """Client for the Linear API + + If you're looking for the schema: https://studio.apollographql.com/public/Linear-API/variant/current/schema + """ + + API_URL = "https://api.linear.app/graphql" + + def __init__( + self, + credentials: LinearCredentials | None = None, + custom_requests: Optional[Requests] = None, + ): + if custom_requests: + self._requests = custom_requests + else: + + headers: Dict[str, str] = { + "Content-Type": "application/json", + } + if credentials: + headers["Authorization"] = credentials.bearer() + + self._requests = Requests( + extra_headers=headers, + trusted_origins=["https://api.linear.app"], + raise_for_status=False, + ) + + def _execute_graphql_request( + self, query: str, variables: dict | None = None + ) -> Any: + """ + Executes a GraphQL request against the Linear API and returns the response data. + + Args: + query: The GraphQL query string. + variables (optional): Any GraphQL query variables + + Returns: + The parsed JSON response data, or raises a LinearAPIException on error. + """ + payload: Dict[str, Any] = {"query": query} + if variables: + payload["variables"] = variables + + response = self._requests.post(self.API_URL, json=payload) + + if not response.ok: + + try: + error_data = response.json() + error_message = error_data.get("errors", [{}])[0].get("message", "") + except json.JSONDecodeError: + error_message = response.text + + raise LinearAPIException( + f"Linear API request failed ({response.status_code}): {error_message}", + response.status_code, + ) + + response_data = response.json() + if "errors" in response_data: + + error_messages = [ + error.get("message", "") for error in response_data["errors"] + ] + raise LinearAPIException( + f"Linear API returned errors: {', '.join(error_messages)}", + response.status_code, + ) + + return response_data["data"] + + def query(self, query: str, variables: Optional[dict] = None) -> dict: + """Executes a GraphQL query. + + Args: + query: The GraphQL query string. + variables: Query variables, if any. + + Returns: + The response data. + """ + return self._execute_graphql_request(query, variables) + + def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict: + """Executes a GraphQL mutation. + + Args: + mutation: The GraphQL mutation string. + variables: Query variables, if any. + + Returns: + The response data. + """ + return self._execute_graphql_request(mutation, variables) + + def try_create_comment(self, issue_id: str, comment: str) -> CreateCommentResponse: + try: + mutation = """ + mutation CommentCreate($input: CommentCreateInput!) { + commentCreate(input: $input) { + success + comment { + id + body + } + } + } + """ + + variables = { + "input": { + "body": comment, + "issueId": issue_id, + } + } + + added_comment = self.mutate(mutation, variables) + # Select the commentCreate field from the mutation response + return CreateCommentResponse(**added_comment["commentCreate"]) + except LinearAPIException as e: + raise e + + def try_get_team_by_name(self, team_name: str) -> str: + try: + query = """ + query GetTeamId($searchTerm: String!) { + teams(filter: { + or: [ + { name: { eqIgnoreCase: $searchTerm } }, + { key: { eqIgnoreCase: $searchTerm } } + ] + }) { + nodes { + id + name + key + } + } + } + """ + + variables: dict[str, Any] = { + "searchTerm": team_name, + } + + team_id = self.query(query, variables) + return team_id["teams"]["nodes"][0]["id"] + except LinearAPIException as e: + raise e + + def try_create_issue( + self, + team_id: str, + title: str, + description: str | None = None, + priority: int | None = None, + project_id: str | None = None, + ) -> CreateIssueResponse: + try: + mutation = """ + mutation IssueCreate($input: IssueCreateInput!) { + issueCreate(input: $input) { + issue { + title + description + id + identifier + priority + } + } + } + """ + + variables: dict[str, Any] = { + "input": { + "teamId": team_id, + "title": title, + } + } + + if project_id: + variables["input"]["projectId"] = project_id + + if description: + variables["input"]["description"] = description + + if priority: + variables["input"]["priority"] = priority + + added_issue = self.mutate(mutation, variables) + return CreateIssueResponse(**added_issue["issueCreate"]) + except LinearAPIException as e: + raise e + + def try_search_projects(self, term: str) -> list[Project]: + try: + query = """ + query SearchProjects($term: String!, $includeComments: Boolean!) { + searchProjects(term: $term, includeComments: $includeComments) { + nodes { + id + name + description + priority + progress + content + } + } + } + """ + + variables: dict[str, Any] = { + "term": term, + "includeComments": True, + } + + projects = self.query(query, variables) + return [ + Project(**project) for project in projects["searchProjects"]["nodes"] + ] + except LinearAPIException as e: + raise e + + def try_search_issues(self, term: str) -> list[Issue]: + try: + query = """ + query SearchIssues($term: String!, $includeComments: Boolean!) { + searchIssues(term: $term, includeComments: $includeComments) { + nodes { + id + identifier + title + description + priority + } + } + } + """ + + variables: dict[str, Any] = { + "term": term, + "includeComments": True, + } + + issues = self.query(query, variables) + return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]] + except LinearAPIException as e: + raise e diff --git a/autogpt_platform/backend/backend/blocks/linear/_auth.py b/autogpt_platform/backend/backend/blocks/linear/_auth.py new file mode 100644 index 000000000..fb91fbfe7 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/_auth.py @@ -0,0 +1,101 @@ +from enum import Enum +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + OAuth2Credentials, +) +from backend.integrations.providers import ProviderName +from backend.util.settings import Secrets + +secrets = Secrets() +LINEAR_OAUTH_IS_CONFIGURED = bool( + secrets.linear_client_id and secrets.linear_client_secret +) + +LinearCredentials = OAuth2Credentials | APIKeyCredentials +# LinearCredentialsInput = CredentialsMetaInput[ +# Literal[ProviderName.LINEAR], +# Literal["oauth2", "api_key"] if LINEAR_OAUTH_IS_CONFIGURED else Literal["oauth2"], +# ] +LinearCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.LINEAR], Literal["oauth2"] +] + + +# (required) Comma separated list of scopes: + +# read - (Default) Read access for the user's account. This scope will always be present. + +# write - Write access for the user's account. If your application only needs to create comments, use a more targeted scope + +# issues:create - Allows creating new issues and their attachments + +# comments:create - Allows creating new issue comments + +# timeSchedule:write - Allows creating and modifying time schedules + + +# admin - Full access to admin level endpoints. You should never ask for this permission unless it's absolutely needed +class LinearScope(str, Enum): + READ = "read" + WRITE = "write" + ISSUES_CREATE = "issues:create" + COMMENTS_CREATE = "comments:create" + TIME_SCHEDULE_WRITE = "timeSchedule:write" + ADMIN = "admin" + + +def LinearCredentialsField(scopes: list[LinearScope]) -> LinearCredentialsInput: + """ + Creates a Linear credentials input on a block. + + Params: + scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes)) + """ # noqa + return CredentialsField( + required_scopes=set([LinearScope.READ.value]).union( + set([scope.value for scope in scopes]) + ), + description="The Linear integration can be used with OAuth, " + "or any API key with sufficient permissions for the blocks it is used on.", + ) + + +TEST_CREDENTIALS_OAUTH = OAuth2Credentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="linear", + title="Mock Linear API key", + username="mock-linear-username", + access_token=SecretStr("mock-linear-access-token"), + access_token_expires_at=None, + refresh_token=SecretStr("mock-linear-refresh-token"), + refresh_token_expires_at=None, + scopes=["mock-linear-scopes"], +) + +TEST_CREDENTIALS_API_KEY = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="linear", + title="Mock Linear API key", + api_key=SecretStr("mock-linear-api-key"), + expires_at=None, +) + +TEST_CREDENTIALS_INPUT_OAUTH = { + "provider": TEST_CREDENTIALS_OAUTH.provider, + "id": TEST_CREDENTIALS_OAUTH.id, + "type": TEST_CREDENTIALS_OAUTH.type, + "title": TEST_CREDENTIALS_OAUTH.type, +} + +TEST_CREDENTIALS_INPUT_API_KEY = { + "provider": TEST_CREDENTIALS_API_KEY.provider, + "id": TEST_CREDENTIALS_API_KEY.id, + "type": TEST_CREDENTIALS_API_KEY.type, + "title": TEST_CREDENTIALS_API_KEY.type, +} diff --git a/autogpt_platform/backend/backend/blocks/linear/comment.py b/autogpt_platform/backend/backend/blocks/linear/comment.py new file mode 100644 index 000000000..ea2ff1e61 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/comment.py @@ -0,0 +1,81 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import CreateCommentResponse +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearCreateCommentBlock(Block): + """Block for creating comments on Linear issues""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.COMMENTS_CREATE], + ) + issue_id: str = SchemaField(description="ID of the issue to comment on") + comment: str = SchemaField(description="Comment text to add to the issue") + + class Output(BlockSchema): + comment_id: str = SchemaField(description="ID of the created comment") + comment_body: str = SchemaField( + description="Text content of the created comment" + ) + error: str = SchemaField(description="Error message if comment creation failed") + + def __init__(self): + super().__init__( + id="8f7d3a2e-9b5c-4c6a-8f1d-7c8b3e4a5d6c", + description="Creates a new comment on a Linear issue", + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "issue_id": "TEST-123", + "comment": "Test comment", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[("comment_id", "abc123"), ("comment_body", "Test comment")], + test_mock={ + "create_comment": lambda *args, **kwargs: ( + "abc123", + "Test comment", + ) + }, + ) + + @staticmethod + def create_comment( + credentials: LinearCredentials, issue_id: str, comment: str + ) -> tuple[str, str]: + client = LinearClient(credentials=credentials) + response: CreateCommentResponse = client.try_create_comment( + issue_id=issue_id, comment=comment + ) + return response.comment.id, response.comment.body + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the comment creation""" + try: + comment_id, comment_body = self.create_comment( + credentials=credentials, + issue_id=input_data.issue_id, + comment=input_data.comment, + ) + + yield "comment_id", comment_id + yield "comment_body", comment_body + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/issues.py b/autogpt_platform/backend/backend/blocks/linear/issues.py new file mode 100644 index 000000000..4c2c3ffa0 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/issues.py @@ -0,0 +1,186 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import CreateIssueResponse, Issue +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearCreateIssueBlock(Block): + """Block for creating issues on Linear""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.ISSUES_CREATE], + ) + title: str = SchemaField(description="Title of the issue") + description: str | None = SchemaField(description="Description of the issue") + team_name: str = SchemaField( + description="Name of the team to create the issue on" + ) + priority: int | None = SchemaField( + description="Priority of the issue", + default=None, + minimum=0, + maximum=4, + ) + project_name: str | None = SchemaField( + description="Name of the project to create the issue on", + default=None, + ) + + class Output(BlockSchema): + issue_id: str = SchemaField(description="ID of the created issue") + issue_title: str = SchemaField(description="Title of the created issue") + error: str = SchemaField(description="Error message if issue creation failed") + + def __init__(self): + super().__init__( + id="f9c68f55-dcca-40a8-8771-abf9601680aa", + description="Creates a new issue on Linear", + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "title": "Test issue", + "description": "Test description", + "team_name": "Test team", + "project_name": "Test project", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[("issue_id", "abc123"), ("issue_title", "Test issue")], + test_mock={ + "create_issue": lambda *args, **kwargs: ( + "abc123", + "Test issue", + ) + }, + ) + + @staticmethod + def create_issue( + credentials: LinearCredentials, + team_name: str, + title: str, + description: str | None = None, + priority: int | None = None, + project_name: str | None = None, + ) -> tuple[str, str]: + client = LinearClient(credentials=credentials) + team_id = client.try_get_team_by_name(team_name=team_name) + project_id: str | None = None + if project_name: + projects = client.try_search_projects(term=project_name) + if projects: + project_id = projects[0].id + else: + raise LinearAPIException("Project not found", status_code=404) + response: CreateIssueResponse = client.try_create_issue( + team_id=team_id, + title=title, + description=description, + priority=priority, + project_id=project_id, + ) + return response.issue.identifier, response.issue.title + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the issue creation""" + try: + issue_id, issue_title = self.create_issue( + credentials=credentials, + team_name=input_data.team_name, + title=input_data.title, + description=input_data.description, + priority=input_data.priority, + project_name=input_data.project_name, + ) + + yield "issue_id", issue_id + yield "issue_title", issue_title + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" + + +class LinearSearchIssuesBlock(Block): + """Block for searching issues on Linear""" + + class Input(BlockSchema): + term: str = SchemaField(description="Term to search for issues") + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.READ], + ) + + class Output(BlockSchema): + issues: list[Issue] = SchemaField(description="List of issues") + + def __init__(self): + super().__init__( + id="b5a2a0e6-26b4-4c5b-8a42-bc79e9cb65c2", + description="Searches for issues on Linear", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "term": "Test issue", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[ + ( + "issues", + [ + Issue( + id="abc123", + identifier="abc123", + title="Test issue", + description="Test description", + priority=1, + ) + ], + ) + ], + test_mock={ + "search_issues": lambda *args, **kwargs: [ + Issue( + id="abc123", + identifier="abc123", + title="Test issue", + description="Test description", + priority=1, + ) + ] + }, + ) + + @staticmethod + def search_issues( + credentials: LinearCredentials, + term: str, + ) -> list[Issue]: + client = LinearClient(credentials=credentials) + response: list[Issue] = client.try_search_issues(term=term) + return response + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the issue search""" + try: + issues = self.search_issues(credentials=credentials, term=input_data.term) + yield "issues", issues + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/models.py b/autogpt_platform/backend/backend/blocks/linear/models.py new file mode 100644 index 000000000..a6a2de3cd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/models.py @@ -0,0 +1,41 @@ +from pydantic import BaseModel + + +class Comment(BaseModel): + id: str + body: str + + +class CreateCommentInput(BaseModel): + body: str + issueId: str + + +class CreateCommentResponse(BaseModel): + success: bool + comment: Comment + + +class CreateCommentResponseWrapper(BaseModel): + commentCreate: CreateCommentResponse + + +class Issue(BaseModel): + id: str + identifier: str + title: str + description: str | None + priority: int + + +class CreateIssueResponse(BaseModel): + issue: Issue + + +class Project(BaseModel): + id: str + name: str + description: str + priority: int + progress: int + content: str diff --git a/autogpt_platform/backend/backend/blocks/linear/projects.py b/autogpt_platform/backend/backend/blocks/linear/projects.py new file mode 100644 index 000000000..9d33b3f77 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/projects.py @@ -0,0 +1,93 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import Project +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearSearchProjectsBlock(Block): + """Block for searching projects on Linear""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.READ], + ) + term: str = SchemaField(description="Term to search for projects") + + class Output(BlockSchema): + projects: list[Project] = SchemaField(description="List of projects") + error: str = SchemaField(description="Error message if issue creation failed") + + def __init__(self): + super().__init__( + id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af", + description="Searches for projects on Linear", + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "term": "Test project", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[ + ( + "projects", + [ + Project( + id="abc123", + name="Test project", + description="Test description", + priority=1, + progress=1, + content="Test content", + ) + ], + ) + ], + test_mock={ + "search_projects": lambda *args, **kwargs: [ + Project( + id="abc123", + name="Test project", + description="Test description", + priority=1, + progress=1, + content="Test content", + ) + ] + }, + ) + + @staticmethod + def search_projects( + credentials: LinearCredentials, + term: str, + ) -> list[Project]: + client = LinearClient(credentials=credentials) + response: list[Project] = client.try_search_projects(term=term) + return response + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the project search""" + try: + projects = self.search_projects( + credentials=credentials, + term=input_data.term, + ) + + yield "projects", projects + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/triggers.py b/autogpt_platform/backend/backend/blocks/linear/triggers.py new file mode 100644 index 000000000..e69de29bb diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py index add06dbb1..effdb2c5d 100644 --- a/autogpt_platform/backend/backend/data/block.py +++ b/autogpt_platform/backend/backend/data/block.py @@ -64,6 +64,8 @@ class BlockCategory(Enum): SAFETY = ( "Block that provides AI safety mechanisms such as detecting harmful content" ) + PRODUCTIVITY = "Block that helps with productivity" + ISSUE_TRACKING = "Block that helps with issue tracking" def dict(self) -> dict[str, str]: return {"category": self.name, "description": self.value} diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index c408a7cf0..fe76b9c21 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -12,6 +12,7 @@ from backend.data import db from backend.data.block import Block, BlockInput, get_block from backend.data.block_cost_config import BLOCK_COSTS from backend.data.cost import BlockCost, BlockCostType +from backend.data.execution import NodeExecutionEntry from backend.data.user import get_user_by_id from backend.util.settings import Settings @@ -33,9 +34,7 @@ class UserCreditBase(ABC): @abstractmethod async def spend_credits( self, - user_id: str, - block_id: str, - input_data: BlockInput, + entry: NodeExecutionEntry, data_size: float, run_time: float, ) -> int: @@ -43,9 +42,7 @@ class UserCreditBase(ABC): Spend the credits for the user based on the block usage. Args: - user_id (str): The user ID. - block_id (str): The block ID. - input_data (BlockInput): The input data for the block. + entry (NodeExecutionEntry): The node execution identifiers & data. data_size (float): The size of the data being processed. run_time (float): The time taken to run the block. @@ -168,7 +165,6 @@ class UserCreditBase(ABC): transaction_type: CreditTransactionType, is_active: bool = True, transaction_key: str | None = None, - block_id: str | None = None, metadata: Json = Json({}), ): async with db.locked_transaction(f"usr_trx_{user_id}"): @@ -185,7 +181,6 @@ class UserCreditBase(ABC): "amount": amount, "runningBalance": user_balance + amount, "type": transaction_type, - "blockId": block_id, "metadata": metadata, "isActive": is_active, "createdAt": self.time_now(), @@ -251,29 +246,31 @@ class UserCredit(UserCreditBase): async def spend_credits( self, - user_id: str, - block_id: str, - input_data: BlockInput, + entry: NodeExecutionEntry, data_size: float, run_time: float, ) -> int: - block = get_block(block_id) + block = get_block(entry.block_id) if not block: - raise ValueError(f"Block not found: {block_id}") + raise ValueError(f"Block not found: {entry.block_id}") cost, matching_filter = self._block_usage_cost( - block=block, input_data=input_data, data_size=data_size, run_time=run_time + block=block, input_data=entry.data, data_size=data_size, run_time=run_time ) if cost == 0: return 0 await self._add_transaction( - user_id=user_id, + user_id=entry.user_id, amount=-cost, transaction_type=CreditTransactionType.USAGE, - block_id=block.id, metadata=Json( { + "graph_exec_id": entry.graph_exec_id, + "graph_id": entry.graph_id, + "node_id": entry.node_id, + "node_exec_id": entry.node_exec_id, + "block_id": entry.block_id, "block": block.name, "input": matching_filter, } @@ -292,28 +289,13 @@ class UserCredit(UserCreditBase): transaction_type=CreditTransactionType.TOP_UP, ) - @staticmethod - async def _get_stripe_customer_id(user_id: str) -> str: - user = await get_user_by_id(user_id) - if not user: - raise ValueError(f"User not found: {user_id}") - - if user.stripeCustomerId: - return user.stripeCustomerId - - customer = stripe.Customer.create(name=user.name or "", email=user.email) - await User.prisma().update( - where={"id": user_id}, data={"stripeCustomerId": customer.id} - ) - return customer.id - async def top_up_intent(self, user_id: str, amount: int) -> str: # Create checkout session # https://docs.stripe.com/checkout/quickstart?client=react # unit_amount param is always in the smallest currency unit (so cents for usd) # which is equal to amount of credits checkout_session = stripe.checkout.Session.create( - customer=await self._get_stripe_customer_id(user_id), + customer=await get_stripe_customer_id(user_id), line_items=[ { "price_data": { @@ -454,3 +436,18 @@ def get_user_credit_model() -> UserCreditBase: def get_block_costs() -> dict[str, list[BlockCost]]: return {block().id: costs for block, costs in BLOCK_COSTS.items()} + + +async def get_stripe_customer_id(user_id: str) -> str: + user = await get_user_by_id(user_id) + if not user: + raise ValueError(f"User not found: {user_id}") + + if user.stripeCustomerId: + return user.stripeCustomerId + + customer = stripe.Customer.create(name=user.name or "", email=user.email) + await User.prisma().update( + where={"id": user_id}, data={"stripeCustomerId": customer.id} + ) + return customer.id diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index 230c8e497..714ea0f51 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -1,9 +1,10 @@ from collections import defaultdict from datetime import datetime, timezone from multiprocessing import Manager -from typing import Any, AsyncGenerator, Generator, Generic, TypeVar +from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar from prisma.enums import AgentExecutionStatus +from prisma.errors import PrismaError from prisma.models import ( AgentGraphExecution, AgentNodeExecution, @@ -31,6 +32,7 @@ class NodeExecutionEntry(BaseModel): graph_id: str node_exec_id: str node_id: str + block_id: str data: BlockInput @@ -324,6 +326,30 @@ async def update_execution_status( return ExecutionResult.from_db(res) +async def get_execution( + execution_id: str, user_id: str +) -> Optional[AgentNodeExecution]: + """ + Get an execution by ID. Returns None if not found. + + Args: + execution_id: The ID of the execution to retrieve + + Returns: + The execution if found, None otherwise + """ + try: + execution = await AgentNodeExecution.prisma().find_unique( + where={ + "id": execution_id, + "userId": user_id, + } + ) + return execution + except PrismaError: + return None + + async def get_execution_results(graph_exec_id: str) -> list[ExecutionResult]: executions = await AgentNodeExecution.prisma().find_many( where={"agentGraphExecutionId": graph_exec_id}, diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 108a089ac..1dee046cc 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -4,6 +4,7 @@ from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, ca from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, + NodeExecutionEntry, RedisExecutionEventBus, create_graph_execution, get_execution_results, @@ -79,7 +80,7 @@ class DatabaseManager(AppService): # Credits user_credit_model = get_user_credit_model() spend_credits = cast( - Callable[[Any, str, str, dict[str, str], float, float], int], + Callable[[Any, NodeExecutionEntry, float, float], int], exposed_run_and_wait(user_credit_model.spend_credits), ) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 33e3ecf67..9ea8c9f76 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -238,7 +238,8 @@ def execute_node( if res.end_time and res.start_time else 0 ) - db_client.spend_credits(user_id, node_block.id, input_data, s, t) + data.data = input_data + db_client.spend_credits(data, s, t) # Update execution stats if execution_stats is not None: @@ -257,7 +258,7 @@ def _enqueue_next_nodes( log_metadata: LogMetadata, ) -> list[NodeExecutionEntry]: def add_enqueued_execution( - node_exec_id: str, node_id: str, data: BlockInput + node_exec_id: str, node_id: str, block_id: str, data: BlockInput ) -> NodeExecutionEntry: exec_update = db_client.update_execution_status( node_exec_id, ExecutionStatus.QUEUED, data @@ -269,6 +270,7 @@ def _enqueue_next_nodes( graph_id=graph_id, node_exec_id=node_exec_id, node_id=node_id, + block_id=block_id, data=data, ) @@ -322,7 +324,12 @@ def _enqueue_next_nodes( # Input is complete, enqueue the execution. log_metadata.info(f"Enqueued {suffix}") enqueued_executions.append( - add_enqueued_execution(next_node_exec_id, next_node_id, next_node_input) + add_enqueued_execution( + node_exec_id=next_node_exec_id, + node_id=next_node_id, + block_id=next_node.block_id, + data=next_node_input, + ) ) # Next execution stops here if the link is not static. @@ -352,7 +359,12 @@ def _enqueue_next_nodes( continue log_metadata.info(f"Enqueueing static-link execution {suffix}") enqueued_executions.append( - add_enqueued_execution(iexec.node_exec_id, next_node_id, idata) + add_enqueued_execution( + node_exec_id=iexec.node_exec_id, + node_id=next_node_id, + block_id=next_node.block_id, + data=idata, + ) ) return enqueued_executions @@ -800,8 +812,8 @@ class ExecutionManager(AppService): # Extract request input data, and assign it to the input pin. if block.block_type == BlockType.INPUT: name = node.input_default.get("name") - if name and name in data: - input_data = {"value": data[name]} + if name in data.get("node_input", {}): + input_data = {"value": data["node_input"][name]} # Extract webhook payload, and assign it to the input pin webhook_payload_key = f"webhook_{node.webhook_id}_payload" @@ -837,6 +849,7 @@ class ExecutionManager(AppService): graph_id=node_exec.graph_id, node_exec_id=node_exec.node_exec_id, node_id=node_exec.node_id, + block_id=node_exec.block_id, data=node_exec.input_data, ) ) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 3aa7a7fb8..ee1c05bf1 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -23,6 +23,15 @@ from backend.util.settings import Settings settings = Settings() +# This is an overrride since ollama doesn't actually require an API key, but the creddential system enforces one be attached +ollama_credentials = APIKeyCredentials( + id="744fdc56-071a-4761-b5a5-0af0ce10a2b5", + provider="ollama", + api_key=SecretStr("FAKE_API_KEY"), + title="Use Credits for Ollama", + expires_at=None, +) + revid_credentials = APIKeyCredentials( id="fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", provider="revid", @@ -124,6 +133,7 @@ nvidia_credentials = APIKeyCredentials( DEFAULT_CREDENTIALS = [ + ollama_credentials, revid_credentials, ideogram_credentials, replicate_credentials, @@ -169,6 +179,10 @@ class IntegrationCredentialsStore: def get_all_creds(self, user_id: str) -> list[Credentials]: users_credentials = self._get_user_integrations(user_id).credentials all_credentials = users_credentials + # These will always be added + all_credentials.append(ollama_credentials) + + # These will only be added if the API key is set if settings.secrets.revid_api_key: all_credentials.append(revid_credentials) if settings.secrets.ideogram_api_key: diff --git a/autogpt_platform/backend/backend/integrations/oauth/__init__.py b/autogpt_platform/backend/backend/integrations/oauth/__init__.py index ec45189c5..50f8a155a 100644 --- a/autogpt_platform/backend/backend/integrations/oauth/__init__.py +++ b/autogpt_platform/backend/backend/integrations/oauth/__init__.py @@ -2,6 +2,7 @@ from typing import TYPE_CHECKING from .github import GitHubOAuthHandler from .google import GoogleOAuthHandler +from .linear import LinearOAuthHandler from .notion import NotionOAuthHandler from .twitter import TwitterOAuthHandler @@ -17,6 +18,7 @@ HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = { GoogleOAuthHandler, NotionOAuthHandler, TwitterOAuthHandler, + LinearOAuthHandler, ] } # --8<-- [end:HANDLERS_BY_NAMEExample] diff --git a/autogpt_platform/backend/backend/integrations/oauth/linear.py b/autogpt_platform/backend/backend/integrations/oauth/linear.py new file mode 100644 index 000000000..fd9d379c1 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/linear.py @@ -0,0 +1,165 @@ +import json +from typing import Optional +from urllib.parse import urlencode + +from pydantic import SecretStr + +from backend.blocks.linear._api import LinearAPIException +from backend.data.model import APIKeyCredentials, OAuth2Credentials +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +from .base import BaseOAuthHandler + + +class LinearOAuthHandler(BaseOAuthHandler): + """ + OAuth2 handler for Linear. + """ + + PROVIDER_NAME = ProviderName.LINEAR + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + self.auth_base_url = "https://linear.app/oauth/authorize" + self.token_url = "https://api.linear.app/oauth/token" # Correct token URL + self.revoke_url = "https://api.linear.app/oauth/revoke" + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + + params = { + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "response_type": "code", # Important: include "response_type" + "scope": ",".join(scopes), # Comma-separated, not space-separated + "state": state, + } + return f"{self.auth_base_url}?{urlencode(params)}" + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + return self._request_tokens({"code": code, "redirect_uri": self.redirect_uri}) + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + if not credentials.access_token: + raise ValueError("No access token to revoke") + + headers = { + "Authorization": f"Bearer {credentials.access_token.get_secret_value()}" + } + + response = requests.post(self.revoke_url, headers=headers) + if not response.ok: + try: + error_data = response.json() + error_message = error_data.get("error", "Unknown error") + except json.JSONDecodeError: + error_message = response.text + raise LinearAPIException( + f"Failed to revoke Linear tokens ({response.status_code}): {error_message}", + response.status_code, + ) + + return True # Linear doesn't return JSON on successful revoke + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + if not credentials.refresh_token: + raise ValueError( + "No refresh token available." + ) # Linear uses non-expiring tokens + + return self._request_tokens( + { + "refresh_token": credentials.refresh_token.get_secret_value(), + "grant_type": "refresh_token", + } + ) + + def _request_tokens( + self, + params: dict[str, str], + current_credentials: Optional[OAuth2Credentials] = None, + ) -> OAuth2Credentials: + request_body = { + "client_id": self.client_id, + "client_secret": self.client_secret, + "grant_type": "authorization_code", # Ensure grant_type is correct + **params, + } + + headers = { + "Content-Type": "application/x-www-form-urlencoded" + } # Correct header for token request + response = requests.post(self.token_url, data=request_body, headers=headers) + + if not response.ok: + try: + error_data = response.json() + error_message = error_data.get("error", "Unknown error") + + except json.JSONDecodeError: + error_message = response.text + raise LinearAPIException( + f"Failed to fetch Linear tokens ({response.status_code}): {error_message}", + response.status_code, + ) + + token_data = response.json() + + # Note: Linear access tokens do not expire, so we set expires_at to None + new_credentials = OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=current_credentials.title if current_credentials else None, + username=token_data.get("user", {}).get( + "name", "Unknown User" + ), # extract name or set appropriate + access_token=token_data["access_token"], + scopes=token_data["scope"].split( + "," + ), # Linear returns comma-separated scopes + refresh_token=token_data.get( + "refresh_token" + ), # Linear uses non-expiring tokens so this might be null + access_token_expires_at=None, + refresh_token_expires_at=None, + ) + if current_credentials: + new_credentials.id = current_credentials.id + return new_credentials + + def _request_username(self, access_token: str) -> Optional[str]: + + # Use the LinearClient to fetch user details using GraphQL + from backend.blocks.linear._api import LinearClient + + try: + + linear_client = LinearClient( + APIKeyCredentials( + api_key=SecretStr(access_token), + title="temp", + provider=self.PROVIDER_NAME, + expires_at=None, + ) + ) # Temporary credentials for this request + + query = """ + query Viewer { + viewer { + name + } + } + """ + + response = linear_client.query(query) + return response["viewer"]["name"] + + except Exception as e: # Handle any errors + + print(f"Error fetching username: {e}") + return None diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py index 6395bfe53..c8cebe0a5 100644 --- a/autogpt_platform/backend/backend/integrations/providers.py +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -17,6 +17,7 @@ class ProviderName(str, Enum): HUBSPOT = "hubspot" IDEOGRAM = "ideogram" JINA = "jina" + LINEAR = "linear" MEDIUM = "medium" NOTION = "notion" NVIDIA = "nvidia" diff --git a/autogpt_platform/backend/backend/server/external/api.py b/autogpt_platform/backend/backend/server/external/api.py new file mode 100644 index 000000000..3236766fd --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/api.py @@ -0,0 +1,11 @@ +from fastapi import FastAPI + +from .routes.v1 import v1_router + +external_app = FastAPI( + title="AutoGPT External API", + description="External API for AutoGPT integrations", + docs_url="/docs", + version="1.0", +) +external_app.include_router(v1_router, prefix="/v1") diff --git a/autogpt_platform/backend/backend/server/external/middleware.py b/autogpt_platform/backend/backend/server/external/middleware.py new file mode 100644 index 000000000..2878e3d31 --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/middleware.py @@ -0,0 +1,37 @@ +from fastapi import Depends, HTTPException, Request +from fastapi.security import APIKeyHeader +from prisma.enums import APIKeyPermission + +from backend.data.api_key import has_permission, validate_api_key + +api_key_header = APIKeyHeader(name="X-API-Key") + + +async def require_api_key(request: Request): + """Base middleware for API key authentication""" + api_key = await api_key_header(request) + + if api_key is None: + raise HTTPException(status_code=401, detail="Missing API key") + + api_key_obj = await validate_api_key(api_key) + + if not api_key_obj: + raise HTTPException(status_code=401, detail="Invalid API key") + + request.state.api_key = api_key_obj + return api_key_obj + + +def require_permission(permission: APIKeyPermission): + """Dependency function for checking specific permissions""" + + async def check_permission(api_key=Depends(require_api_key)): + if not has_permission(api_key, permission): + raise HTTPException( + status_code=403, + detail=f"API key missing required permission: {permission}", + ) + return api_key + + return check_permission diff --git a/autogpt_platform/backend/backend/server/external/routes/__init__.py b/autogpt_platform/backend/backend/server/external/routes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autogpt_platform/backend/backend/server/external/routes/v1.py b/autogpt_platform/backend/backend/server/external/routes/v1.py new file mode 100644 index 000000000..ecc64a603 --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/routes/v1.py @@ -0,0 +1,111 @@ +import logging +from collections import defaultdict +from typing import Any, Sequence + +from autogpt_libs.utils.cache import thread_cached +from fastapi import APIRouter, Depends, HTTPException +from prisma.enums import APIKeyPermission + +import backend.data.block +from backend.data import execution as execution_db +from backend.data import graph as graph_db +from backend.data.api_key import APIKey +from backend.data.block import BlockInput, CompletedBlockOutput +from backend.executor import ExecutionManager +from backend.server.external.middleware import require_permission +from backend.util.service import get_service_client +from backend.util.settings import Settings + + +@thread_cached +def execution_manager_client() -> ExecutionManager: + return get_service_client(ExecutionManager) + + +settings = Settings() +logger = logging.getLogger(__name__) + +v1_router = APIRouter() + + +@v1_router.get( + path="/blocks", + tags=["blocks"], + dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))], +) +def get_graph_blocks() -> Sequence[dict[Any, Any]]: + blocks = [block() for block in backend.data.block.get_blocks().values()] + return [b.to_dict() for b in blocks] + + +@v1_router.post( + path="/blocks/{block_id}/execute", + tags=["blocks"], + dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))], +) +def execute_graph_block( + block_id: str, + data: BlockInput, + api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)), +) -> CompletedBlockOutput: + obj = backend.data.block.get_block(block_id) + if not obj: + raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + + output = defaultdict(list) + for name, data in obj.execute(data): + output[name].append(data) + return output + + +@v1_router.post( + path="/graphs/{graph_id}/execute", + tags=["graphs"], +) +def execute_graph( + graph_id: str, + node_input: dict[Any, Any], + api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)), +) -> dict[str, Any]: + try: + graph_exec = execution_manager_client().add_execution( + graph_id, node_input, user_id=api_key.user_id + ) + return {"id": graph_exec.graph_exec_id} + except Exception as e: + msg = e.__str__().encode().decode("unicode_escape") + raise HTTPException(status_code=400, detail=msg) + + +@v1_router.get( + path="/graphs/{graph_id}/executions/{graph_exec_id}/results", + tags=["graphs"], +) +async def get_graph_execution_results( + graph_id: str, + graph_exec_id: str, + api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)), +) -> dict: + graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id) + if not graph: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + + results = await execution_db.get_execution_results(graph_exec_id) + + return { + "execution_id": graph_exec_id, + "nodes": [ + { + "node_id": result.node_id, + "input": ( + result.input_data.get("value") + if "value" in result.input_data + else result.input_data + ), + "output": result.output_data.get( + "response", result.output_data.get("result", []) + ), + } + for result in results + ], + } diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index c7ef4a500..9a5267e2f 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -105,6 +105,11 @@ def callback( logger.debug(f"Received credentials with final scopes: {credentials.scopes}") + # Linear returns scopes as a single string with spaces, so we need to split them + # TODO: make a bypass of this part of the OAuth handler + if len(credentials.scopes) == 1 and " " in credentials.scopes[0]: + credentials.scopes = credentials.scopes[0].split(" ") + # Check if the granted scopes are sufficient for the requested scopes if not set(scopes).issubset(set(credentials.scopes)): # For now, we'll just log the warning and continue diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index c5be1c179..b5124e0c0 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -20,6 +20,7 @@ import backend.server.v2.library.routes import backend.server.v2.store.routes import backend.util.service import backend.util.settings +from backend.server.external.api import external_app settings = backend.util.settings.Settings() logger = logging.getLogger(__name__) @@ -94,6 +95,8 @@ app.include_router( backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library" ) +app.mount("/external-api", external_app) + @app.get(path="/health", tags=["health"], dependencies=[]) async def health(): diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index 1728630a3..2b1b76d65 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -29,7 +29,11 @@ from backend.data.api_key import ( update_api_key_permissions, ) from backend.data.block import BlockInput, CompletedBlockOutput -from backend.data.credit import get_block_costs, get_user_credit_model +from backend.data.credit import ( + get_block_costs, + get_stripe_customer_id, + get_user_credit_model, +) from backend.data.user import get_or_create_user from backend.executor import ExecutionManager, ExecutionScheduler, scheduler from backend.integrations.creds_manager import IntegrationCredentialsManager @@ -186,6 +190,21 @@ async def stripe_webhook(request: Request): return Response(status_code=200) +@v1_router.get(path="/credits/manage", dependencies=[Depends(auth_middleware)]) +async def manage_payment_method( + user_id: Annotated[str, Depends(get_user_id)], +) -> dict[str, str]: + session = stripe.billing_portal.Session.create( + customer=await get_stripe_customer_id(user_id), + return_url=settings.config.platform_base_url + "/store/credits", + ) + if not session: + raise HTTPException( + status_code=400, detail="Failed to create billing portal session" + ) + return {"url": session.url} + + ######################################################## ##################### Graphs ########################### ######################################################## @@ -594,7 +613,6 @@ def get_execution_schedules( tags=["api-keys"], dependencies=[Depends(auth_middleware)], ) -@feature_flag("api-keys-enabled") async def create_api_key( request: CreateAPIKeyRequest, user_id: Annotated[str, Depends(get_user_id)] ) -> CreateAPIKeyResponse: @@ -618,7 +636,6 @@ async def create_api_key( tags=["api-keys"], dependencies=[Depends(auth_middleware)], ) -@feature_flag("api-keys-enabled") async def get_api_keys( user_id: Annotated[str, Depends(get_user_id)] ) -> list[APIKeyWithoutHash]: @@ -636,7 +653,6 @@ async def get_api_keys( tags=["api-keys"], dependencies=[Depends(auth_middleware)], ) -@feature_flag("api-keys-enabled") async def get_api_key( key_id: str, user_id: Annotated[str, Depends(get_user_id)] ) -> APIKeyWithoutHash: diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 80ec82d23..24f3ca6d3 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -316,6 +316,9 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): e2b_api_key: str = Field(default="", description="E2B API key") nvidia_api_key: str = Field(default="", description="Nvidia API key") + linear_client_id: str = Field(default="", description="Linear client ID") + linear_client_secret: str = Field(default="", description="Linear client secret") + stripe_api_key: str = Field(default="", description="Stripe API Key") stripe_webhook_secret: str = Field(default="", description="Stripe Webhook Secret") diff --git a/autogpt_platform/backend/migrations/20250115382614_reshape_transaction_metadata_column/migration.sql b/autogpt_platform/backend/migrations/20250115382614_reshape_transaction_metadata_column/migration.sql new file mode 100644 index 000000000..c6952d769 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250115382614_reshape_transaction_metadata_column/migration.sql @@ -0,0 +1,35 @@ +/* + Warnings: + + - You are about to drop the column `blockId` on the `CreditTransaction` table. All the data in the column will be moved to metadata->block_id. + +*/ +BEGIN; + +-- DropForeignKey blockId +ALTER TABLE "CreditTransaction" DROP CONSTRAINT "CreditTransaction_blockId_fkey"; + +-- Update migrate blockId into metadata->"block_id" +UPDATE "CreditTransaction" +SET "metadata" = jsonb_set( + COALESCE("metadata"::jsonb, '{}'), + '{block_id}', + to_jsonb("blockId") +) +WHERE "blockId" IS NOT NULL; + +-- AlterTable drop blockId +ALTER TABLE "CreditTransaction" DROP COLUMN "blockId"; + +COMMIT; + +/* + These indices dropped below were part of the cleanup during the schema change applied above. + These indexes were not useful and will not impact anything upon their removal. +*/ + +-- DropIndex +DROP INDEX "StoreListingReview_storeListingVersionId_idx"; + +-- DropIndex +DROP INDEX "StoreListingSubmission_Status_idx"; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 6d3322240..a284886d9 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -32,12 +32,12 @@ model User { AgentPreset AgentPreset[] UserAgent UserAgent[] - Profile Profile[] - StoreListing StoreListing[] - StoreListingReview StoreListingReview[] - StoreListingSubmission StoreListingSubmission[] - APIKeys APIKey[] - IntegrationWebhooks IntegrationWebhook[] + Profile Profile[] + StoreListing StoreListing[] + StoreListingReview StoreListingReview[] + StoreListingSubmission StoreListingSubmission[] + APIKeys APIKey[] + IntegrationWebhooks IntegrationWebhook[] @@index([id]) @@index([email]) @@ -64,23 +64,23 @@ model AgentGraph { AgentNodes AgentNode[] AgentGraphExecution AgentGraphExecution[] - AgentPreset AgentPreset[] - UserAgent UserAgent[] - StoreListing StoreListing[] - StoreListingVersion StoreListingVersion? + AgentPreset AgentPreset[] + UserAgent UserAgent[] + StoreListing StoreListing[] + StoreListingVersion StoreListingVersion? @@id(name: "graphVersionId", [id, version]) @@index([userId, isActive]) } -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// //////////////// USER SPECIFIC DATA //////////////////// -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// // An AgentPrest is an Agent + User Configuration of that agent. -// For example, if someone has created a weather agent and they want to set it up to +// For example, if someone has created a weather agent and they want to set it up to // Inform them of extreme weather warnings in Texas, the agent with the configuration to set it to // monitor texas, along with the cron setup or webhook tiggers, is an AgentPreset model AgentPreset { @@ -102,9 +102,9 @@ model AgentPreset { agentVersion Int Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade) - InputPresets AgentNodeExecutionInputOutput[] @relation("AgentPresetsInputData") - UserAgents UserAgent[] - AgentExecution AgentGraphExecution[] + InputPresets AgentNodeExecutionInputOutput[] @relation("AgentPresetsInputData") + UserAgents UserAgent[] + AgentExecution AgentGraphExecution[] @@index([userId]) } @@ -134,11 +134,11 @@ model UserAgent { @@index([userId]) } -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// //////// AGENT DEFINITION AND EXECUTION TABLES //////// -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// // This model describes a single node in the Agent Graph/Flow (Multi Agent System). model AgentNode { @@ -207,7 +207,6 @@ model AgentBlock { // Prisma requires explicit back-references. ReferencedByAgentNode AgentNode[] - CreditTransaction CreditTransaction[] } // This model describes the status of an AgentGraphExecution or AgentNodeExecution. @@ -345,11 +344,11 @@ model AnalyticsDetails { @@index([type]) } -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// ////////////// METRICS TRACKING TABLES //////////////// -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// model AnalyticsMetrics { id String @id @default(uuid()) createdAt DateTime @default(now()) @@ -375,11 +374,11 @@ enum CreditTransactionType { USAGE } -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// //////// ACCOUNTING AND CREDIT SYSTEM TABLES ////////// -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// model CreditTransaction { transactionKey String @default(uuid()) createdAt DateTime @default(now()) @@ -387,9 +386,6 @@ model CreditTransaction { userId String user User @relation(fields: [userId], references: [id], onDelete: Cascade) - blockId String? - block AgentBlock? @relation(fields: [blockId], references: [id]) - amount Int type CreditTransactionType @@ -402,11 +398,11 @@ model CreditTransaction { @@index([userId, createdAt]) } -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// ////////////// Store TABLES /////////////////////////// -//////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// model Profile { id String @id @default(uuid()) @@ -414,7 +410,7 @@ model Profile { updatedAt DateTime @default(now()) @updatedAt // Only 1 of user or group can be set. - // The user this profile belongs to, if any. + // The user this profile belongs to, if any. userId String? User User? @relation(fields: [userId], references: [id], onDelete: Cascade) @@ -528,7 +524,7 @@ model StoreListingVersion { agentVersion Int Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version]) - // The detials for this version of the agent, this allows the author to update the details of the agent, + // The details for this version of the agent, this allows the author to update the details of the agent, // But still allow using old versions of the agent with there original details. // TODO: Create a database view that shows only the latest version of each store listing. slug String @@ -573,7 +569,6 @@ model StoreListingReview { comments String? @@unique([storeListingVersionId, reviewByUserId]) - @@index([storeListingVersionId]) } enum SubmissionStatus { @@ -601,7 +596,6 @@ model StoreListingSubmission { reviewComments String? @@index([storeListingId]) - @@index([Status]) } enum APIKeyPermission { diff --git a/autogpt_platform/backend/test/data/test_credit.py b/autogpt_platform/backend/test/data/test_credit.py index 007c382d5..9007cfea5 100644 --- a/autogpt_platform/backend/test/data/test_credit.py +++ b/autogpt_platform/backend/test/data/test_credit.py @@ -5,6 +5,7 @@ from prisma.models import CreditTransaction from backend.blocks.llm import AITextGeneratorBlock from backend.data.credit import BetaUserCredit +from backend.data.execution import NodeExecutionEntry from backend.data.user import DEFAULT_USER_ID from backend.integrations.credentials_store import openai_credentials from backend.util.test import SpinTestServer @@ -24,25 +25,37 @@ async def test_block_credit_usage(server: SpinTestServer): current_credit = await user_credit.get_credits(DEFAULT_USER_ID) spending_amount_1 = await user_credit.spend_credits( - DEFAULT_USER_ID, - AITextGeneratorBlock().id, - { - "model": "gpt-4-turbo", - "credentials": { - "id": openai_credentials.id, - "provider": openai_credentials.provider, - "type": openai_credentials.type, + NodeExecutionEntry( + user_id=DEFAULT_USER_ID, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_graph_exec", + node_exec_id="test_node_exec", + block_id=AITextGeneratorBlock().id, + data={ + "model": "gpt-4-turbo", + "credentials": { + "id": openai_credentials.id, + "provider": openai_credentials.provider, + "type": openai_credentials.type, + }, }, - }, + ), 0.0, 0.0, ) assert spending_amount_1 > 0 spending_amount_2 = await user_credit.spend_credits( - DEFAULT_USER_ID, - AITextGeneratorBlock().id, - {"model": "gpt-4-turbo", "api_key": "owned_api_key"}, + NodeExecutionEntry( + user_id=DEFAULT_USER_ID, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_graph_exec", + node_exec_id="test_node_exec", + block_id=AITextGeneratorBlock().id, + data={"model": "gpt-4-turbo", "api_key": "owned_api_key"}, + ), 0.0, 0.0, ) diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py index 9bcd04a08..1f69defc8 100644 --- a/autogpt_platform/backend/test/executor/test_manager.py +++ b/autogpt_platform/backend/test/executor/test_manager.py @@ -125,7 +125,7 @@ async def test_agent_execution(server: SpinTestServer): logger.info("Starting test_agent_execution") test_user = await create_test_user() test_graph = await create_graph(server, create_test_graph(), test_user) - data = {"input_1": "Hello", "input_2": "World"} + data = {"node_input": {"input_1": "Hello", "input_2": "World"}} graph_exec_id = await execute_graph( server.agent_server, test_graph, diff --git a/autogpt_platform/backend/test/test_data_creator.py b/autogpt_platform/backend/test/test_data_creator.py index 1f79386cc..d5235043d 100644 --- a/autogpt_platform/backend/test/test_data_creator.py +++ b/autogpt_platform/backend/test/test_data_creator.py @@ -298,7 +298,6 @@ async def main(): data={ "transactionKey": str(faker.uuid4()), "userId": user.id, - "blockId": block.id, "amount": random.randint(1, 100), "type": ( prisma.enums.CreditTransactionType.TOP_UP diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx index 26f96d917..a4fb51266 100644 --- a/autogpt_platform/frontend/src/app/profile/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -98,6 +98,7 @@ export default function PrivatePage() { // This contains ids for built-in "Use Credits for X" credentials const hiddenCredentials = useMemo( () => [ + "744fdc56-071a-4761-b5a5-0af0ce10a2b5", // Ollama "fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", // Revid "760f84fc-b270-42de-91f6-08efe1b512d0", // Ideogram "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate diff --git a/autogpt_platform/frontend/src/app/store/(user)/credits/page.tsx b/autogpt_platform/frontend/src/app/store/(user)/credits/page.tsx index f4a8e34a0..3a02adc07 100644 --- a/autogpt_platform/frontend/src/app/store/(user)/credits/page.tsx +++ b/autogpt_platform/frontend/src/app/store/(user)/credits/page.tsx @@ -2,14 +2,15 @@ import { Button } from "@/components/agptui/Button"; import useCredits from "@/hooks/useCredits"; import { useBackendAPI } from "@/lib/autogpt-server-api/context"; -import { useSearchParams } from "next/navigation"; +import { useSearchParams, useRouter } from "next/navigation"; import { useEffect, useState } from "react"; export default function CreditsPage() { - const { credits, requestTopUp } = useCredits(); + const { requestTopUp } = useCredits(); const [amount, setAmount] = useState(5); const [patched, setPatched] = useState(false); const searchParams = useSearchParams(); + const router = useRouter(); const topupStatus = searchParams.get("topup"); const api = useBackendAPI(); @@ -20,54 +21,84 @@ export default function CreditsPage() { } }, [api, patched, topupStatus]); + const openBillingPortal = async () => { + const portal = await api.getUserPaymentPortalLink(); + router.push(portal.url); + }; + return (

Credits

-

- Current credits: {credits} -

-

- Top-up Credits -

-

- {topupStatus === "success" && ( - - Your payment was successful. Your credits will be updated shortly. - - )} - {topupStatus === "cancel" && ( - - Payment failed. Your payment method has not been charged. - - )} -

-
- -
- setAmount(parseInt(e.target.value))} - /> + +
+ {/* Left Column */} +
+

Top-up Credits

+ +

+ {topupStatus === "success" && ( + + Your payment was successful. Your credits will be updated + shortly. + + )} + {topupStatus === "cancel" && ( + + Payment failed. Your payment method has not been charged. + + )} +

+ +
+ +
+ setAmount(parseInt(e.target.value))} + /> +
+
+ + +
+ + {/* Right Column */} +
+

Manage Your Payment Methods

+
+

+ You can manage your cards and see your payment history in the + billing portal. +

+
+ +
-
); } diff --git a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx index a22cbaa57..330b470f8 100644 --- a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx +++ b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx @@ -98,6 +98,7 @@ export default function PrivatePage() { // This contains ids for built-in "Use Credits for X" credentials const hiddenCredentials = useMemo( () => [ + "744fdc56-071a-4761-b5a5-0af0ce10a2b5", // Ollama "fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", // Revid "760f84fc-b270-42de-91f6-08efe1b512d0", // Ideogram "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index 65873c975..340fc6cb4 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -14,6 +14,7 @@ import { FaGoogle, FaMedium, FaKey, + FaHubspot, } from "react-icons/fa"; import { FC, useMemo, useState } from "react"; import { @@ -66,6 +67,7 @@ export const providerIcons: Record< google_maps: FaGoogle, jina: fallbackIcon, ideogram: fallbackIcon, + linear: fallbackIcon, medium: FaMedium, ollama: fallbackIcon, openai: fallbackIcon, @@ -81,7 +83,7 @@ export const providerIcons: Record< twitter: FaTwitter, unreal_speech: fallbackIcon, exa: fallbackIcon, - hubspot: fallbackIcon, + hubspot: FaHubspot, }; // --8<-- [end:ProviderIconsEmbed] diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx index 437d0e1d9..ab8f0a5a9 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -30,6 +30,7 @@ const providerDisplayNames: Record = { hubspot: "Hubspot", ideogram: "Ideogram", jina: "Jina", + linear: "Linear", medium: "Medium", notion: "Notion", nvidia: "Nvidia", diff --git a/autogpt_platform/frontend/src/hooks/useAgentGraph.ts b/autogpt_platform/frontend/src/hooks/useAgentGraph.ts index a21280f13..c2910f2c5 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentGraph.ts +++ b/autogpt_platform/frontend/src/hooks/useAgentGraph.ts @@ -862,6 +862,7 @@ export default function useAgentGraph( title: "Error saving agent", description: errorMessage, }); + setSaveRunRequest({ request: "save", state: "error" }); } }, [_saveAgent, toast]); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index 0950ff92d..b813afca4 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -93,6 +93,10 @@ export default class BackendAPI { return this._request("POST", "/credits", { amount }); } + getUserPaymentPortalLink(): Promise<{ url: string }> { + return this._get("/credits/manage"); + } + fulfillCheckout(): Promise { return this._request("PATCH", "/credits"); } diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index aa8545faa..dc046fd58 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -119,6 +119,7 @@ export const PROVIDER_NAMES = { HUBSPOT: "hubspot", IDEOGRAM: "ideogram", JINA: "jina", + LINEAR: "linear", MEDIUM: "medium", NOTION: "notion", NVIDIA: "nvidia", diff --git a/docs/content/imgs/ollama/Ollama-Select-Llama32.png b/docs/content/imgs/ollama/Ollama-Select-Llama32.png index fc9b58d6a..cd39e1b4e 100644 Binary files a/docs/content/imgs/ollama/Ollama-Select-Llama32.png and b/docs/content/imgs/ollama/Ollama-Select-Llama32.png differ diff --git a/docs/content/platform/ollama.md b/docs/content/platform/ollama.md index 3a26a6717..9076b333d 100644 --- a/docs/content/platform/ollama.md +++ b/docs/content/platform/ollama.md @@ -45,13 +45,7 @@ Now that both Ollama and the AutoGPT platform are running we can move onto using 2. In the "LLM Model" dropdown, select "llama3.2" (This is the model we downloaded earlier) ![Select Ollama Model](../imgs/ollama/Ollama-Select-Llama32.png) -3. You will see it ask for "Ollama Credentials", simply press "Enter API key" - ![Ollama Credentials](../imgs/ollama/Ollama-Enter-API-key.png) - - And you will see "Add new API key for Ollama", In the API key field you can enter anything you want as Ollama does not require an API key, I usually just enter a space, for the Name call it "Ollama" then press "Save & use this API key" - ![Ollama Credentials](../imgs/ollama/Ollama-Credentials.png) - -4. After that you will now see the block again, add your prompts then save and then run the graph: +3. Now we need to add some prompts then save and then run the graph: ![Add Prompt](../imgs/ollama/Ollama-Add-Prompts.png) That's it! You've successfully setup the AutoGPT platform and made a LLM call to Ollama.