fix linting

pull/2531/head
BillSchumacher 2023-04-16 22:56:34 -05:00
parent 81c65af560
commit 708374d95b
No known key found for this signature in database
GPG Key ID: 1133789BDD03E12A
3 changed files with 51 additions and 27 deletions

View File

@ -74,9 +74,9 @@ def main() -> None:
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
logger.typewriter_log(
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
system_prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)

View File

@ -19,18 +19,25 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
system_prompt: The system prompt is the initial prompt that defines everything
the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are
ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified above:
The triggering prompt is not part of the system prompt because between the system prompt and the triggering
prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
triggering_prompt: The last sentence the AI will see before answering.
For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified
above:
The triggering prompt is not part of the system prompt because between the
system prompt and the triggering
prompt we have contextual information that can distract the AI and make it
forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
The triggering prompt reminds the AI about its short term meta task (defining the next task)
The triggering prompt reminds the AI about its short term meta task
(defining the next task)
"""
def __init__(
@ -96,14 +103,13 @@ class Agent:
try:
print_assistant_thoughts(self.ai_name, assistant_reply_json)
command_name, arguments = get_command(assistant_reply_json)
# command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
if cfg.speak_mode:
say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
logger.typewriter_log(
@ -177,10 +183,13 @@ class Agent:
command_name, arguments = plugin.pre_command(
command_name, arguments
)
result = (
f"Command {command_name} returned: "
f"{execute_command(self.command_registry, command_name, arguments, self.config.prompt_generator)}"
command_result = execute_command(
self.command_registry,
command_name,
arguments,
self.config.prompt_generator,
)
result = f"Command {command_name} returned: " f"{command_result}"
for plugin in cfg.plugins:
result = plugin.post_command(command_name, result)

View File

@ -9,16 +9,20 @@ Code Analysis
Objective:
The objective of the "scrape_text" function is to scrape the text content from
a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
a given URL and return it as a string, after removing any unwanted HTML tags and
scripts.
Inputs:
- url: a string representing the URL of the webpage to be scraped.
Flow:
1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
1. Send a GET request to the given URL using the requests library and the user agent
header from the config file.
2. Check if the response contains an HTTP error. If it does, return an error message.
3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
3. Use BeautifulSoup to parse the HTML content of the response and extract all script
and style tags.
4. Get the text content of the remaining HTML using the get_text() method of
BeautifulSoup.
5. Split the text into lines and then into chunks, removing any extra whitespace.
6. Join the chunks into a single string with newline characters between them.
7. Return the cleaned text.
@ -27,9 +31,12 @@ Outputs:
- A string representing the cleaned text content of the webpage.
Additional aspects:
- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
- The function uses the requests library and BeautifulSoup to handle the HTTP request
and HTML parsing, respectively.
- The function removes script and style tags from the HTML to avoid including unwanted
content in the text output.
- The function uses a generator expression to split the text into lines and chunks,
which can improve performance for large amounts of text.
"""
@ -40,26 +47,33 @@ class TestScrapeText:
expected_text = "This is some sample text"
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.text = f"<html><body><div><p style='color: blue;'>{expected_text}</p></div></body></html>"
mock_response.text = (
"<html><body><div><p style='color: blue;'>"
f"{expected_text}</p></div></body></html>"
)
mocker.patch("requests.Session.get", return_value=mock_response)
# Call the function with a valid URL and assert that it returns the expected text
# Call the function with a valid URL and assert that it returns the
# expected text
url = "http://www.example.com"
assert scrape_text(url) == expected_text
# Tests that the function returns an error message when an invalid or unreachable url is provided.
# Tests that the function returns an error message when an invalid or unreachable
# url is provided.
def test_invalid_url(self, mocker):
# Mock the requests.get() method to raise an exception
mocker.patch(
"requests.Session.get", side_effect=requests.exceptions.RequestException
)
# Call the function with an invalid URL and assert that it returns an error message
# Call the function with an invalid URL and assert that it returns an error
# message
url = "http://www.invalidurl.com"
error_message = scrape_text(url)
assert "Error:" in error_message
# Tests that the function returns an empty string when the html page contains no text to be scraped.
# Tests that the function returns an empty string when the html page contains no
# text to be scraped.
def test_no_text(self, mocker):
# Mock the requests.get() method to return a response with no text
mock_response = mocker.Mock()
@ -71,7 +85,8 @@ class TestScrapeText:
url = "http://www.example.com"
assert scrape_text(url) == ""
# Tests that the function returns an error message when the response status code is an http error (>=400).
# Tests that the function returns an error message when the response status code is
# an http error (>=400).
def test_http_error(self, mocker):
# Mock the requests.get() method to return a response with a 404 status code
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))