web_scrap/web_scrap.py
2023-03-04 16:12:42 +01:00

60 lines
2.1 KiB
Python

#!/usr/bin/python3
# Python 3
# Extraction des liens d'une page web
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests
URL = "www.clarissariviere.com"
page = requests.get("https://{0}".format(URL))
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
webpage = []
for i in page_url:
page = requests.get(i)
o = urlparse(i)
print(o.path)
if page.status_code == 200:
print("page : {0}".format(i))
soup = BeautifulSoup(page.text, 'html.parser')
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
for j in range(1,int(number_lastpage)):
paging = j * 10
categorie = urlparse(i).path.split("/")
url_paging = "https://{0}/archives/p{1}-10.html".format(URL, paging)
if len(categorie) > 2:
url_paging = "https://{0}/archives/{1}/p{2}-10.html".format(URL, categorie[2], paging)
print(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
if href not in webpage:
webpage.append(href)
print(webpage)