feat: 1.0.0
This commit is contained in:
parent
248340badb
commit
ff1401761d
41
scrape.py
41
scrape.py
@ -1,22 +1,14 @@
|
|||||||
"""
|
|
||||||
title: Scrape Recursively
|
|
||||||
author: Mark Bailey
|
|
||||||
author_url: https://git.markbailey.dev
|
|
||||||
git_url: https://git.markbailey.dev/cerbervs/scrape.git
|
|
||||||
description: Scrapes a website recursively using requests and BeautifulSoup.
|
|
||||||
version: 1.0.0
|
|
||||||
licence: MIT
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import websocket
|
||||||
import requests
|
import requests
|
||||||
from urllib.parse import urlparse as parse_url
|
from urllib.parse import urlparse as parse_url
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from pydantic import BaseModel, ConfigDict
|
from pydantic import BaseModel, ConfigDict
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_REQUEST_LIMIT = 50
|
||||||
class Tools:
|
class Tools:
|
||||||
req_limit: int = 100
|
req_limit: int = DEFAULT_REQUEST_LIMIT
|
||||||
|
|
||||||
class RecursiveScraper(BaseModel):
|
class RecursiveScraper(BaseModel):
|
||||||
citation: bool = True
|
citation: bool = True
|
||||||
@ -24,11 +16,11 @@ class Tools:
|
|||||||
netloc: str | None = None
|
netloc: str | None = None
|
||||||
scheme: str | None = None
|
scheme: str | None = None
|
||||||
data: dict = {}
|
data: dict = {}
|
||||||
req_limit: int = 5
|
req_limit: int = DEFAULT_REQUEST_LIMIT
|
||||||
|
|
||||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
|
||||||
def __init__(self, req_limit: int = 5):
|
def __init__(self, req_limit: int = DEFAULT_REQUEST_LIMIT):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.req_limit = req_limit
|
self.req_limit = req_limit
|
||||||
|
|
||||||
@ -52,14 +44,22 @@ class Tools:
|
|||||||
if cleaned_url in self.visited:
|
if cleaned_url in self.visited:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# Send GET request
|
# Try to send GET request using WebSocket
|
||||||
response = requests.get(cleaned_url)
|
try:
|
||||||
if not response.ok:
|
ws = websocket.create_connection(f"ws://{self.netloc}", timeout=1)
|
||||||
|
ws.send(json.dumps({"url": cleaned_url}))
|
||||||
|
response = ws.recv()
|
||||||
|
ws.close()
|
||||||
|
except Exception:
|
||||||
|
# Fall back to requests library if WebSocket fails
|
||||||
|
response = requests.get(cleaned_url).text
|
||||||
|
|
||||||
|
if not response:
|
||||||
self.visited.append(cleaned_url)
|
self.visited.append(cleaned_url)
|
||||||
raise Exception("Failed to fetch URL: " + cleaned_url)
|
raise Exception("Failed to fetch URL: " + cleaned_url)
|
||||||
|
|
||||||
# Parse HTML content using BeautifulSoup and lxml parser
|
# Parse HTML content using BeautifulSoup and lxml parser
|
||||||
soup = BeautifulSoup(response.text, "lxml")
|
soup = BeautifulSoup(response, "lxml")
|
||||||
data = self.extract_data(soup)
|
data = self.extract_data(soup)
|
||||||
|
|
||||||
# Mark URL as visited
|
# Mark URL as visited
|
||||||
@ -146,7 +146,7 @@ class Tools:
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def __init__(self, req_limit: int = 10):
|
def __init__(self, req_limit: int = DEFAULT_REQUEST_LIMIT):
|
||||||
"""
|
"""
|
||||||
Initializes the Tools class.
|
Initializes the Tools class.
|
||||||
:params req_limit: The number of requests to be made to scrape the website.
|
:params req_limit: The number of requests to be made to scrape the website.
|
||||||
@ -171,7 +171,6 @@ class Tools:
|
|||||||
|
|
||||||
return json.dumps(data)
|
return json.dumps(data)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
url = "https://pkg.go.dev/github.com/go-chi/chi/v5"
|
tools = Tools()
|
||||||
print(Tools(req_limit=10).scrape_recursively(url))
|
print(tools.scrape_recursively("https://pkg.go.dev/github.com/go-chi/chi/v5"))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user