update type annotation

pull/1580/head
youkaichao 2023-04-15 22:04:05 +08:00
parent 1073954fb7
commit afd2c5e2c6
3 changed files with 14 additions and 13 deletions

View File

@ -14,7 +14,7 @@ class AgentManager(metaclass=Singleton):
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
def create_agent(self, task: str, prompt: str, model: str) -> Tuple[int, str]:
"""Create a new agent and return its key
Args:

View File

@ -72,7 +72,7 @@ def get_response(
timeout (int): The timeout for the HTTP request
Returns:
tuple[None, str] | tuple[Response, None]: The response and error message
Tuple[None, str] | Tuple[Response, None]: The response and error message
Raises:
ValueError: If the URL is invalid

View File

@ -11,12 +11,13 @@ from selenium.webdriver.chrome.options import Options
import logging
from pathlib import Path
from autogpt.config import Config
from typing import List, Tuple, Union
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
def browse_website(url: str, question: str) -> Tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
Args:
@ -24,7 +25,7 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
question (str): The question asked by the user
Returns:
tuple[str, WebDriver]: The answer and links to the user and the webdriver
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
"""
driver, text = scrape_text_with_selenium(url)
add_header(driver)
@ -38,14 +39,14 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
def scrape_text_with_selenium(url: str) -> Tuple[WebDriver, str]:
"""Scrape text from a website using selenium
Args:
url (str): The url of the website to scrape
Returns:
tuple[WebDriver, str]: The webdriver and the text scraped from the website
Tuple[WebDriver, str]: The webdriver and the text scraped from the website
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
@ -77,14 +78,14 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
return driver, text
def scrape_links_with_selenium(driver: WebDriver) -> list[str]:
def scrape_links_with_selenium(driver: WebDriver) -> List[str]:
"""Scrape links from a website using selenium
Args:
driver (WebDriver): The webdriver to use to scrape the links
Returns:
list[str]: The links scraped from the website
List[str]: The links scraped from the website
"""
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")
@ -109,26 +110,26 @@ def close_browser(driver: WebDriver) -> None:
driver.quit()
def extract_hyperlinks(soup: BeautifulSoup) -> list[tuple[str, str]]:
def extract_hyperlinks(soup: BeautifulSoup) -> List[Tuple[str, str]]:
"""Extract hyperlinks from a BeautifulSoup object
Args:
soup (BeautifulSoup): The BeautifulSoup object to extract the hyperlinks from
Returns:
list[tuple[str, str]]: The hyperlinks extracted from the BeautifulSoup object
List[Tuple[str, str]]: The hyperlinks extracted from the BeautifulSoup object
"""
return [(link.text, link["href"]) for link in soup.find_all("a", href=True)]
def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
def format_hyperlinks(hyperlinks: List[Tuple[str, str]]) -> List[str]:
"""Format hyperlinks to be displayed to the user
Args:
hyperlinks (list[tuple[str, str]]): The hyperlinks to format
hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
Returns:
list[str]: The formatted hyperlinks
List[str]: The formatted hyperlinks
"""
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]