web_scrap/web_scrap.py

71 lines
2.6 KiB
Python
Raw Normal View History

2023-02-28 21:42:21 +01:00
#!/usr/bin/python3
# Python 3
# Extraction des liens d'une page web
from bs4 import BeautifulSoup
2023-03-04 16:12:42 +01:00
from urllib.parse import urlparse
2023-03-04 18:35:06 +01:00
import requests, os
2023-03-04 18:45:32 +01:00
BACKUP_DIR = "backup"
URL = "www.clarissariviere.com"
2023-03-04 18:35:06 +01:00
def mkdir_path(path_dir):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
os.mkdir(repath)
mkdir_path(BACKUP_DIR)
2023-02-28 21:42:21 +01:00
2023-02-28 21:52:12 +01:00
2023-03-02 23:28:04 +01:00
page = requests.get("https://{0}".format(URL))
page_url = []
2023-02-28 21:52:12 +01:00
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
2023-02-28 22:03:03 +01:00
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
2023-03-02 23:28:04 +01:00
if href != "#":
page_url.append(href)
2023-02-28 22:24:16 +01:00
2023-03-03 20:03:48 +01:00
webpage = []
2023-03-02 23:28:04 +01:00
for i in page_url:
page = requests.get(i)
2023-02-28 22:24:16 +01:00
if page.status_code == 200:
2023-03-02 23:28:04 +01:00
print("page : {0}".format(i))
soup = BeautifulSoup(page.text, 'html.parser')
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
for j in range(1,int(number_lastpage)):
paging = j * 10
2023-03-04 16:12:42 +01:00
categorie = urlparse(i).path.split("/")
2023-03-02 23:28:04 +01:00
url_paging = "https://{0}/archives/p{1}-10.html".format(URL, paging)
2023-03-04 16:12:42 +01:00
if len(categorie) > 2:
url_paging = "https://{0}/archives/{1}/p{2}-10.html".format(URL, categorie[2], paging)
2023-03-02 23:28:04 +01:00
print(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
h2 = soup.find_all("h2")
for title in h2:
2023-03-03 20:03:48 +01:00
href = title.find_all("a")[0].get("href", "/")
if href not in webpage:
webpage.append(href)
2023-03-04 18:35:06 +01:00
for i in webpage:
o = urlparse(i)
path_web = o.path.split("/")
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
mkdir_path("{0}/{1}".format(BACKUP_DIR, dir_page_web))