limits the number of links that a webpage can return.

pull/10/head
Torantulino 2023-03-30 12:45:15 +01:00
parent d78613e558
commit 23f19a8611
2 changed files with 9 additions and 11 deletions

View File

@ -34,7 +34,7 @@ def format_hyperlinks(hyperlinks):
formatted_links = []
for link_text, link_url in hyperlinks:
formatted_links.append(f"{link_text} ({link_url})")
return '\n'.join(formatted_links)
return formatted_links
def scrape_links(url):
response = requests.get(url)
@ -49,14 +49,8 @@ def scrape_links(url):
script.extract()
hyperlinks = extract_hyperlinks(soup)
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = '\n'.join(chunk for chunk in chunks if chunk)
text = format_hyperlinks(hyperlinks)
return text
return format_hyperlinks(hyperlinks)
def split_text(text, max_length=8192):
paragraphs = text.split("\n")

View File

@ -80,6 +80,10 @@ def browse_website(url):
summary = get_text_summary(url)
links = get_hyperlinks(url)
# Limit links to 5
if len(links) > 5:
links = links[:5]
result = f"""Website Content Summary: {summary}\n\nLinks: {links}"""
return result
@ -90,8 +94,8 @@ def get_text_summary(url):
return """ "Result" : """ + summary
def get_hyperlinks(url):
text = browse.scrape_links(url)
return text
link_list = browse.scrape_links(url)
return link_list
def check_news(source):
print("Checking news from BBC world instead of " + source)