Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implemented Selenium based web browsing. #1397

Merged
merged 4 commits into from
Apr 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 3 additions & 16 deletions autogpt/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError

from autogpt.web import browse_website
cfg = Config()


Expand Down Expand Up @@ -60,7 +60,8 @@ def execute_command(command_name, arguments):

# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
key = cfg.google_api_key
if key and key.strip() and key != "your-google-api-key":
return google_official_search(arguments["input"])
else:
return google_search(arguments["input"])
Expand Down Expand Up @@ -174,20 +175,6 @@ def google_official_search(query, num_results=8):
return search_results_links


def browse_website(url, question):
"""Browse a website and return the summary and links"""
summary = get_text_summary(url, question)
links = get_hyperlinks(url)

# Limit links to 5
if len(links) > 5:
links = links[:5]

result = f"""Website Content Summary: {summary}\n\nLinks: {links}"""

return result


def get_text_summary(url, question):
"""Return the results of a google search"""
text = browse.scrape_text(url)
Expand Down
29 changes: 29 additions & 0 deletions autogpt/js/overlay.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
const overlay = document.createElement('div');
Object.assign(overlay.style, {
position: 'fixed',
zIndex: 999999,
top: 0,
left: 0,
width: '100%',
height: '100%',
background: 'rgba(0, 0, 0, 0.7)',
color: '#fff',
fontSize: '24px',
fontWeight: 'bold',
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
});
const textContent = document.createElement('div');
Object.assign(textContent.style, {
textAlign: 'center',
});
textContent.textContent = 'AutoGPT Analyzing Page';
overlay.appendChild(textContent);
document.body.append(overlay);
document.body.style.overflow = 'hidden';
let dotCount = 0;
setInterval(() => {
textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
dotCount = (dotCount + 1) % 4;
}, 1000);
67 changes: 67 additions & 0 deletions autogpt/summary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from autogpt.llm_utils import create_chat_completion


def summarize_text(driver, text, question):
if not text:
return "Error: No text to summarize"

text_length = len(text)
print(f"Text length: {text_length} characters")

summaries = []
chunks = list(split_text(text))

scroll_ratio = 1 / len(chunks)
for i, chunk in enumerate(chunks):
scroll_to_percentage(driver , scroll_ratio * i)
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
messages = [create_message(chunk, question)]

summary = create_chat_completion(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=300,
)
summaries.append(summary)

print(f"Summarized {len(chunks)} chunks.")

combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]

return create_chat_completion(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=300,
)


def split_text(text, max_length=8192):
paragraphs = text.split("\n")
current_length = 0
current_chunk = []

for paragraph in paragraphs:
if current_length + len(paragraph) + 1 <= max_length:
current_chunk.append(paragraph)
current_length += len(paragraph) + 1
else:
yield "\n".join(current_chunk)
current_chunk = [paragraph]
current_length = len(paragraph) + 1

if current_chunk:
yield "\n".join(current_chunk)


def create_message(chunk, question):
return {
"role": "user",
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need “please” in the prompt?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we must be kind to the algos

}


def scroll_to_percentage(driver, ratio):
if ratio < 0 or ratio > 1:
raise ValueError("Percentage should be between 0 and 1")
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
86 changes: 86 additions & 0 deletions autogpt/web.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from duckduckgo_search import ddg
from selenium import webdriver
import autogpt.summary as summary
from bs4 import BeautifulSoup
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import os
import logging
from pathlib import Path
from autogpt.config import Config
file_dir = Path(__file__).parent
cfg = Config()


def browse_website(url, question):
driver, text = scrape_text_with_selenium(url)
add_header(driver)
summary_text = summary.summarize_text(driver, text, question)
links = scrape_links_with_selenium(driver)

# Limit links to 5
if len(links) > 5:
links = links[:5]
close_browser(driver)
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver


def scrape_text_with_selenium(url):
logging.getLogger('selenium').setLevel(logging.CRITICAL)

options = Options()
options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36')
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
driver.get(url)

WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, 'body'))
)

# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")

for script in soup(["script", "style"]):
script.extract()

text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = '\n'.join(chunk for chunk in chunks if chunk)
return driver, text


def scrape_links_with_selenium(driver):
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")

for script in soup(["script", "style"]):
script.extract()

hyperlinks = extract_hyperlinks(soup)

return format_hyperlinks(hyperlinks)


def close_browser(driver):
driver.quit()


def extract_hyperlinks(soup):
return [(link.text, link['href']) for link in soup.find_all('a', href=True)]


def format_hyperlinks(hyperlinks):
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]


def add_header(driver):
driver.execute_script(open(f"{file_dir}/js/overlay.js", "r").read())