2023-04-08 22:14:20 +02:00
|
|
|
#!/usr/bin/python3
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
from urllib.parse import urlparse
|
|
|
|
import requests, os, argparse, logging
|
2023-04-13 21:54:35 +02:00
|
|
|
from requests.adapters import HTTPAdapter
|
|
|
|
from requests.packages.urllib3.util.retry import Retry
|
2023-04-08 22:14:20 +02:00
|
|
|
|
|
|
|
class WPExport:
|
2023-04-09 22:49:44 +02:00
|
|
|
def __init__(self, url, logger, parser, directory):
|
2023-04-08 22:14:20 +02:00
|
|
|
self._url = url
|
|
|
|
self._logger = logger
|
2023-04-09 21:45:51 +02:00
|
|
|
self._parser = parser
|
2023-04-09 22:49:44 +02:00
|
|
|
self._dir = directory
|
|
|
|
|
2023-04-13 21:54:35 +02:00
|
|
|
self._request = requests.Session()
|
|
|
|
|
|
|
|
retries = Retry(total=5,
|
|
|
|
status_forcelist=[429, 500, 502, 503, 504])
|
|
|
|
|
|
|
|
self._request.mount('http://', HTTPAdapter(max_retries=retries))
|
|
|
|
|
2023-04-09 22:49:44 +02:00
|
|
|
|
|
|
|
# Public method
|
|
|
|
|
2023-04-10 15:41:14 +02:00
|
|
|
# Set URL
|
|
|
|
|
|
|
|
def setUrl(self, url):
|
|
|
|
self._url = url
|
|
|
|
|
2023-04-09 22:49:44 +02:00
|
|
|
# Download JS
|
|
|
|
|
|
|
|
def downloadJs(self):
|
|
|
|
script = self._getScriptCss(True, False)
|
|
|
|
o = urlparse(self._url)
|
|
|
|
self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))
|
|
|
|
|
|
|
|
# Download CSS
|
|
|
|
|
|
|
|
def downloadCss(self):
|
|
|
|
css = self._getScriptCss(False, True)
|
|
|
|
o = urlparse(self._url)
|
2023-04-09 23:49:10 +02:00
|
|
|
self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
|
2023-04-09 22:49:44 +02:00
|
|
|
|
|
|
|
# Download HTML
|
|
|
|
|
|
|
|
def downloadHTML(self, webpage):
|
|
|
|
self._downloadPage(webpage, self._dir)
|
|
|
|
|
|
|
|
# Download Image
|
|
|
|
|
|
|
|
def downloadImg(self, webpage):
|
|
|
|
page_src = self._getImg(webpage)
|
|
|
|
o = urlparse(self._url)
|
|
|
|
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
|
|
|
|
|
|
|
|
|
|
|
|
# Get URL
|
|
|
|
def getUrlPage(self):
|
|
|
|
try:
|
2023-04-13 21:54:35 +02:00
|
|
|
page = self._request.get(self._url)
|
2023-04-09 22:49:44 +02:00
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Connection error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
page_url = []
|
|
|
|
if page.status_code == 200:
|
|
|
|
soup = BeautifulSoup(page.text, self._parser)
|
|
|
|
ul = soup.find_all("ul", id="listsmooth")
|
|
|
|
for anchor in ul[0].find_all("a"):
|
|
|
|
href = anchor.get('href', '/')
|
|
|
|
if href != "#":
|
|
|
|
page_url.append(href)
|
|
|
|
|
|
|
|
webpage = []
|
|
|
|
for i in page_url:
|
|
|
|
try:
|
2023-04-13 21:54:35 +02:00
|
|
|
page = self._request.get(i)
|
2023-04-09 22:49:44 +02:00
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Connection error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
if page.status_code == 200:
|
|
|
|
self._logger.info("page : {0}".format(i))
|
|
|
|
if i not in webpage:
|
|
|
|
webpage.append(i)
|
|
|
|
soup = BeautifulSoup(page.text, self._parser)
|
|
|
|
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
|
|
|
|
if len(class_div) > 0:
|
|
|
|
pagingfirstline = class_div[0].find_all("a")
|
|
|
|
if len(pagingfirstline) > 1:
|
|
|
|
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
|
|
|
|
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
|
|
|
|
number_page = element_lastpage.split("-")[0].split("p")[1]
|
|
|
|
number_lastpage = int(number_page) / 10
|
|
|
|
for j in range(1,int(number_lastpage)):
|
|
|
|
paging = j * 10
|
|
|
|
categorie = urlparse(i).path.split("/")
|
2023-04-09 23:49:10 +02:00
|
|
|
url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
|
2023-04-09 22:49:44 +02:00
|
|
|
if len(categorie) > 2:
|
2023-04-09 23:49:10 +02:00
|
|
|
url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
|
2023-04-09 22:49:44 +02:00
|
|
|
self._logger.info(url_paging)
|
|
|
|
if url_paging not in webpage:
|
|
|
|
webpage.append(url_paging)
|
2023-04-13 21:54:35 +02:00
|
|
|
page = self._request.get(url_paging)
|
2023-04-09 22:49:44 +02:00
|
|
|
if page.status_code == 200:
|
|
|
|
soup = BeautifulSoup(page.text, self._parser)
|
|
|
|
h2 = soup.find_all("h2")
|
|
|
|
for title in h2:
|
|
|
|
href = title.find_all("a")[0].get("href", "/")
|
|
|
|
if href not in webpage:
|
|
|
|
try:
|
|
|
|
o = urlparse(href)
|
|
|
|
o = o._replace(scheme="https").geturl()
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("parsing error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
webpage.append(o)
|
|
|
|
return webpage
|
|
|
|
|
|
|
|
|
|
|
|
# Private method
|
|
|
|
#
|
|
|
|
# Create path
|
2023-04-09 23:49:10 +02:00
|
|
|
def _mkdirPath(self, path_dir):
|
2023-04-08 22:14:20 +02:00
|
|
|
if not os.path.exists(path_dir):
|
|
|
|
makedir = []
|
|
|
|
pathh = path_dir.split("/")
|
|
|
|
for i in pathh:
|
|
|
|
makedir.append(i)
|
|
|
|
repath = "/".join(makedir)
|
|
|
|
if not os.path.exists(repath):
|
|
|
|
self._logger.debug("Dossier crée : {0}".format(repath))
|
|
|
|
try:
|
|
|
|
if len(repath) > 0:
|
|
|
|
os.mkdir(repath)
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Directory error : {0}".format(err))
|
|
|
|
self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
|
|
|
|
exit(1)
|
|
|
|
|
|
|
|
|
2023-04-09 22:49:44 +02:00
|
|
|
# Get Css and JS
|
|
|
|
def _getScriptCss(self, js, css):
|
2023-04-08 22:14:20 +02:00
|
|
|
try:
|
2023-04-13 21:54:35 +02:00
|
|
|
page = self._request.get(self._url)
|
2023-04-08 22:14:20 +02:00
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Connection error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
page_url = []
|
|
|
|
if page.status_code == 200:
|
2023-04-09 21:45:51 +02:00
|
|
|
soup = BeautifulSoup(page.text, self._parser)
|
2023-04-08 22:14:20 +02:00
|
|
|
if js is True:
|
|
|
|
script = soup.find_all("script")
|
|
|
|
for anchor in script:
|
|
|
|
src = anchor.get("src", "/")
|
|
|
|
if src != "/":
|
|
|
|
try:
|
2023-04-09 23:49:10 +02:00
|
|
|
u = urlparse(self._url)
|
2023-04-08 22:14:20 +02:00
|
|
|
o = urlparse(src)
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("parsing error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
if o.netloc == "":
|
|
|
|
o = o._replace(netloc=u.netloc)
|
|
|
|
o = o._replace(scheme=u.scheme)
|
|
|
|
page_url.append(o.geturl())
|
|
|
|
if css is True:
|
|
|
|
link = soup.find_all("link")
|
|
|
|
for anchor in link:
|
|
|
|
rel = anchor.get("rel")
|
|
|
|
if rel[0] == "stylesheet":
|
|
|
|
href = anchor.get("href", "/")
|
|
|
|
if href != "/":
|
|
|
|
try:
|
2023-04-09 23:49:10 +02:00
|
|
|
u = urlparse(self._url)
|
2023-04-08 22:14:20 +02:00
|
|
|
o = urlparse(href)
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("parsing error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
if o.netloc == "":
|
|
|
|
o = o._replace(netloc=u.netloc)
|
|
|
|
o = o._replace(scheme=u.scheme)
|
|
|
|
page_url.append(o.geturl())
|
|
|
|
return page_url
|
|
|
|
|
2023-04-09 22:49:44 +02:00
|
|
|
# Get image
|
|
|
|
|
2023-04-08 22:14:20 +02:00
|
|
|
def _getImg(self, webpage):
|
|
|
|
page_img = []
|
|
|
|
for i in webpage:
|
|
|
|
try:
|
2023-04-13 21:54:35 +02:00
|
|
|
page = self._request.get(i)
|
2023-04-08 22:14:20 +02:00
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Connection error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
if page.status_code == 200:
|
2023-04-09 21:45:51 +02:00
|
|
|
soup = BeautifulSoup(page.text, self._parser)
|
2023-04-08 22:14:20 +02:00
|
|
|
img = soup.find_all("img")
|
|
|
|
self._logger.info("image from page: {0} : ".format(i))
|
|
|
|
for anchor in img:
|
|
|
|
src = anchor.get("src", "/")
|
|
|
|
if src != "/":
|
|
|
|
if src not in page_img:
|
|
|
|
self._logger.info("image: {0} : ".format(src))
|
|
|
|
page_img.append(src)
|
|
|
|
return page_img
|
|
|
|
|
|
|
|
|
2023-04-09 22:49:44 +02:00
|
|
|
# Download page
|
2023-04-08 22:14:20 +02:00
|
|
|
def _downloadPage(self, webpage, backup_dir):
|
|
|
|
|
|
|
|
for i in range(0, len(webpage)):
|
|
|
|
try:
|
|
|
|
o = urlparse(webpage[i])
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("parsing error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
path_web = o.path.split("/")
|
|
|
|
filePageWeb = path_web[len(path_web)-1]
|
|
|
|
path_web.pop(len(path_web)-1)
|
|
|
|
dir_page_web = "/".join(path_web)
|
|
|
|
self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
|
|
|
|
try:
|
2023-04-13 21:54:35 +02:00
|
|
|
r = self._request.get(webpage[i])
|
2023-04-08 22:14:20 +02:00
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("Connection error : {0}".format(err))
|
|
|
|
exit(1)
|
|
|
|
if r.status_code == 200:
|
|
|
|
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
|
|
|
|
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
|
|
|
|
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
|
|
|
|
self._logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
|
|
|
|
try:
|
|
|
|
open(fileDownload, "wb").write(r.content)
|
|
|
|
except Exception as err:
|
|
|
|
self._logger.error("file error : {0}".format(err))
|
2023-04-09 22:49:44 +02:00
|
|
|
exit(1)
|