web_scrap/lib/WPExport.py

281 lines
12 KiB
Python
Raw Normal View History

2023-04-08 22:14:20 +02:00
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging
2023-04-13 21:54:35 +02:00
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
2023-04-08 22:14:20 +02:00
class WPExport:
2023-04-25 16:15:45 +02:00
def __init__(self, name = "Thread-0", url = "", logger = None, parser = "html.parser", directory = "backup"):
2023-04-08 22:14:20 +02:00
self._url = url
self._logger = logger
2023-04-09 21:45:51 +02:00
self._parser = parser
2023-04-09 22:49:44 +02:00
self._dir = directory
2023-04-25 16:15:45 +02:00
self._name = name
2023-04-09 22:49:44 +02:00
2023-05-07 09:26:48 +02:00
2023-04-13 21:54:35 +02:00
self._request = requests.Session()
2023-04-13 21:59:12 +02:00
retries = Retry(total=10,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
2023-04-13 21:54:35 +02:00
self._request.mount('http://', HTTPAdapter(max_retries=retries))
2023-04-09 22:49:44 +02:00
2023-05-01 21:58:47 +02:00
# Destructor
def __del__(self):
self._logger.info("{0} : Export finished for {1}".format(self._name, self._url))
2023-04-09 22:49:44 +02:00
# Public method
2023-05-07 09:26:48 +02:00
2023-04-24 23:15:29 +02:00
# Set name
def setName(self, name):
2023-04-25 00:34:25 +02:00
self._name = "Thread-{0}".format(int(name) + 1)
2023-04-24 23:15:29 +02:00
# Set URL
def setUrl(self, url):
self._url = url
2023-04-09 22:49:44 +02:00
# Download JS
def downloadJs(self):
script = self._getScriptCss(True, False)
o = urlparse(self._url)
self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))
# Download CSS
def downloadCss(self):
css = self._getScriptCss(False, True)
o = urlparse(self._url)
2023-04-09 23:49:10 +02:00
self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
2023-04-09 22:49:44 +02:00
# Download HTML
def downloadHTML(self, webpage):
self._downloadPage(webpage, self._dir)
# Download Image
def downloadImg(self, webpage):
page_src = self._getImg(webpage)
o = urlparse(self._url)
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
# Get URL
2023-04-24 23:15:29 +02:00
def getUrlPage(self, index_thread, max_thread):
2023-04-09 22:49:44 +02:00
try:
2023-04-13 21:54:35 +02:00
page = self._request.get(self._url)
2023-04-09 22:49:44 +02:00
except Exception as err:
2023-04-24 23:15:29 +02:00
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
2023-04-09 22:49:44 +02:00
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
2023-04-20 20:53:50 +02:00
else:
2023-04-24 23:15:29 +02:00
self._logger.error("{0} : URL did not get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
2023-04-20 20:53:50 +02:00
2023-05-07 17:38:44 +02:00
webpage = {"principal": {"page":[], "article":[]}, "publications": {"page":[], "article":[]}}
2023-04-09 22:49:44 +02:00
for i in page_url:
2023-05-07 17:38:44 +02:00
section = "publications"
o = urlparse(i)
o = o._replace(scheme="https")
i = o.geturl().replace(":///", "://")
if i == "{0}/".format(self._url):
section = "principal"
2023-04-09 22:49:44 +02:00
try:
2023-04-13 21:54:35 +02:00
page = self._request.get(i)
2023-04-09 22:49:44 +02:00
except Exception as err:
2023-04-24 23:15:29 +02:00
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
2023-04-09 22:49:44 +02:00
exit(1)
if page.status_code == 200:
2023-04-24 23:15:29 +02:00
self._logger.info("{0} : page : {1}".format(self._name, i))
2023-05-07 17:38:44 +02:00
if i not in webpage[section]["page"]:
webpage[section]["page"].append(i)
2023-04-09 22:49:44 +02:00
soup = BeautifulSoup(page.text, self._parser)
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
2023-04-24 23:15:29 +02:00
setPageDivided = int(number_lastpage) / max_thread
setPagePart = setPageDivided * (index_thread + 1)
firstPagePart = (setPagePart - setPageDivided)
2023-04-24 23:15:29 +02:00
self._logger.debug("{0} : Total page : {1}".format(self._name,int(number_lastpage)))
self._logger.debug("{0} : First range : {1}".format(self._name, int(firstPagePart)))
self._logger.debug("{0} : Last range : {1}".format(self._name, int(setPagePart)))
for j in range(int(firstPagePart),int(setPagePart)):
2023-04-09 22:49:44 +02:00
paging = j * 10
categorie = urlparse(i).path.split("/")
2023-04-09 23:49:10 +02:00
url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
2023-04-09 22:49:44 +02:00
if len(categorie) > 2:
2023-04-09 23:49:10 +02:00
url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
2023-04-24 23:15:29 +02:00
self._logger.info("{0} : {1}".format(self._name, url_paging))
2023-05-07 17:38:44 +02:00
if url_paging not in webpage[section]["page"]:
webpage[section]["page"].append(url_paging)
2023-04-13 21:54:35 +02:00
page = self._request.get(url_paging)
2023-04-09 22:49:44 +02:00
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
2023-05-07 17:38:44 +02:00
if href not in webpage[section]["article"]:
2023-04-09 22:49:44 +02:00
try:
o = urlparse(href)
o = o._replace(scheme="https").geturl()
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
2023-05-07 17:38:44 +02:00
webpage[section]["article"].append(o)
2023-04-20 20:53:50 +02:00
else:
2023-04-24 23:15:29 +02:00
self._logger.error("{0} : web didn't get due status code : {1}".format(self._name, page.status_code))
2023-05-07 09:26:48 +02:00
self._logger.debug("{0} : {1}".format(self._name, page.content))
2023-04-09 22:49:44 +02:00
return webpage
2023-05-07 09:26:48 +02:00
2023-04-09 22:49:44 +02:00
# Private method
#
# Create path
2023-04-09 23:49:10 +02:00
def _mkdirPath(self, path_dir):
2023-04-08 22:14:20 +02:00
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
2023-04-26 23:03:43 +02:00
self._logger.debug("{0} : Dossier crée : {1}".format(self._name, repath))
2023-04-08 22:14:20 +02:00
try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
self._logger.error("Directory error : {0}".format(err))
self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
2023-04-09 22:49:44 +02:00
# Get Css and JS
def _getScriptCss(self, js, css):
2023-04-08 22:14:20 +02:00
try:
2023-04-13 21:54:35 +02:00
page = self._request.get(self._url)
2023-04-08 22:14:20 +02:00
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
2023-04-09 21:45:51 +02:00
soup = BeautifulSoup(page.text, self._parser)
2023-04-08 22:14:20 +02:00
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
2023-04-09 23:49:10 +02:00
u = urlparse(self._url)
2023-04-08 22:14:20 +02:00
o = urlparse(src)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
2023-04-09 23:49:10 +02:00
u = urlparse(self._url)
2023-04-08 22:14:20 +02:00
o = urlparse(href)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
2023-04-20 20:53:50 +02:00
else:
self._logger.error("JS or CSS did not get due status code : {0}".format(page.status_code))
self._logger.debug(page.content)
2023-04-08 22:14:20 +02:00
return page_url
2023-04-09 22:49:44 +02:00
# Get image
2023-04-08 22:14:20 +02:00
def _getImg(self, webpage):
page_img = []
for i in webpage:
try:
2023-04-13 21:54:35 +02:00
page = self._request.get(i)
2023-04-08 22:14:20 +02:00
except Exception as err:
2023-04-26 23:03:43 +02:00
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
2023-04-08 22:14:20 +02:00
exit(1)
if page.status_code == 200:
2023-04-09 21:45:51 +02:00
soup = BeautifulSoup(page.text, self._parser)
2023-04-08 22:14:20 +02:00
img = soup.find_all("img")
2023-04-26 23:03:43 +02:00
self._logger.info("{0} : image from page: {1} : ".format(self._name,i))
2023-04-08 22:14:20 +02:00
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
2023-04-26 23:03:43 +02:00
self._logger.info("{0} : image: {1} : ".format(self._name, src))
2023-04-08 22:14:20 +02:00
page_img.append(src)
2023-04-20 20:53:50 +02:00
else:
2023-04-26 23:03:43 +02:00
self._logger.error("{0} : Image did not get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
2023-04-20 20:53:50 +02:00
2023-04-08 22:14:20 +02:00
return page_img
2023-04-09 22:49:44 +02:00
# Download page
2023-04-08 22:14:20 +02:00
def _downloadPage(self, webpage, backup_dir):
for i in range(0, len(webpage)):
try:
o = urlparse(webpage[i])
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
try:
2023-04-13 21:54:35 +02:00
r = self._request.get(webpage[i])
2023-04-08 22:14:20 +02:00
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
if r.status_code == 200:
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
2023-04-26 23:03:43 +02:00
self._logger.info("{0} : {1}/{2} : {3}".format(self._name, i+1, len(webpage), fileDownload))
2023-04-08 22:14:20 +02:00
try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
self._logger.error("file error : {0}".format(err))
2023-04-20 20:53:50 +02:00
exit(1)
else:
self._logger.error("Not download due status code : {0}".format(r.status_code))
self._logger.debug(r.content)