Created
September 10, 2023 01:07
-
-
Save alexpaden/3aa1aa964995cff30ce5160ba29235c9 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from selenium import webdriver | |
from selenium.webdriver.common.by import By | |
from selenium.webdriver.firefox.options import Options | |
from selenium.webdriver.common.keys import Keys | |
import pandas as pd | |
import time | |
import logging | |
import json | |
from selenium.common.exceptions import NoSuchElementException, TimeoutException, WebDriverException | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
def scrape_websites(test_mode=False): | |
driver = webdriver.Firefox() | |
url_df = pd.read_csv("urls.csv") | |
url_df['url'] = url_df['url'].str.strip().str.rstrip(',') | |
url_df.drop_duplicates(subset='url', inplace=True) | |
if test_mode: | |
url_df = url_df.head(10) | |
df = pd.DataFrame(columns=['URL', 'Main_Content']) | |
for index, row in url_df.iterrows(): | |
url = row['url'] | |
logging.info(f"Scraping {url}") | |
try: | |
driver.get(url) | |
time.sleep(2) | |
main_content = '' | |
try: | |
body_text = driver.find_element(By.TAG_NAME, "body").text | |
main_content = body_text | |
except NoSuchElementException: | |
logging.warning(f"Body element not found for {url}") | |
if not main_content: | |
logging.warning(f"No content found for {url}") | |
df = df._append({'URL': url, 'Main_Content': main_content}, ignore_index=True) | |
except TimeoutException: | |
logging.error(f"Timeout occurred while trying to access {url}") | |
df = df._append({'URL': url, 'Main_Content': ''}, ignore_index=True) | |
except WebDriverException as e: | |
logging.error(f"WebDriverException occurred while scraping {url}: {e}") | |
df = df._append({'URL': url, 'Main_Content': ''}, ignore_index=True) | |
except Exception as e: | |
logging.error(f"An unspecified error occurred while scraping {url}: {e}") | |
df = df._append({'URL': url, 'Main_Content': ''}, ignore_index=True) | |
driver.quit() | |
return df | |
df = scrape_websites(test_mode=False) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment