diff --git a/WPExport.py b/WPExport.py new file mode 100644 index 0000000..87571d7 --- /dev/null +++ b/WPExport.py @@ -0,0 +1,242 @@ +#!/usr/bin/python3 +from bs4 import BeautifulSoup +from urllib.parse import urlparse +import requests, os, argparse, logging + +class WPExport: + def __init__(self, url, logger): + self._url = url + self._logger = logger + + def _mkdirPath(self, path_dir, logger): + if not os.path.exists(path_dir): + makedir = [] + pathh = path_dir.split("/") + for i in pathh: + makedir.append(i) + repath = "/".join(makedir) + if not os.path.exists(repath): + self._logger.debug("Dossier crée : {0}".format(repath)) + try: + if len(repath) > 0: + os.mkdir(repath) + except Exception as err: + self._logger.error("Directory error : {0}".format(err)) + self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir)) + exit(1) + + + def _getScriptCss(self, js, css, logger): + try: + page = requests.get(url) + except Exception as err: + self._logger.error("Connection error : {0}".format(err)) + exit(1) + page_url = [] + if page.status_code == 200: + soup = BeautifulSoup(page.text, 'html.parser') + if js is True: + script = soup.find_all("script") + for anchor in script: + src = anchor.get("src", "/") + if src != "/": + try: + u = urlparse(url) + o = urlparse(src) + except Exception as err: + self._logger.error("parsing error : {0}".format(err)) + exit(1) + if o.netloc == "": + o = o._replace(netloc=u.netloc) + o = o._replace(scheme=u.scheme) + page_url.append(o.geturl()) + if css is True: + link = soup.find_all("link") + for anchor in link: + rel = anchor.get("rel") + if rel[0] == "stylesheet": + href = anchor.get("href", "/") + if href != "/": + try: + u = urlparse(url) + o = urlparse(href) + except Exception as err: + self._logger.error("parsing error : {0}".format(err)) + exit(1) + if o.netloc == "": + o = o._replace(netloc=u.netloc) + o = o._replace(scheme=u.scheme) + page_url.append(o.geturl()) + return page_url + + def _getImg(self, webpage): + page_img = [] + for i in webpage: + try: + page = requests.get(i) + except Exception as err: + self._logger.error("Connection error : {0}".format(err)) + exit(1) + if page.status_code == 200: + soup = BeautifulSoup(page.text, 'html.parser') + img = soup.find_all("img") + self._logger.info("image from page: {0} : ".format(i)) + for anchor in img: + src = anchor.get("src", "/") + if src != "/": + if src not in page_img: + self._logger.info("image: {0} : ".format(src)) + page_img.append(src) + return page_img + + def _getUrlPage(self): + try: + page = requests.get(self._url) + except Exception as err: + self._logger.error("Connection error : {0}".format(err)) + exit(1) + page_url = [] + if page.status_code == 200: + soup = BeautifulSoup(page.text, 'html.parser') + ul = soup.find_all("ul", id="listsmooth") + for anchor in ul[0].find_all("a"): + href = anchor.get('href', '/') + if href != "#": + page_url.append(href) + + webpage = [] + for i in page_url: + try: + page = requests.get(i) + except Exception as err: + self._logger.error("Connection error : {0}".format(err)) + exit(1) + if page.status_code == 200: + self._logger.info("page : {0}".format(i)) + if i not in webpage: + webpage.append(i) + soup = BeautifulSoup(page.text, 'html.parser') + class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline") + if len(class_div) > 0: + pagingfirstline = class_div[0].find_all("a") + if len(pagingfirstline) > 1: + lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/") + element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1] + number_page = element_lastpage.split("-")[0].split("p")[1] + number_lastpage = int(number_page) / 10 + for j in range(1,int(number_lastpage)): + paging = j * 10 + categorie = urlparse(i).path.split("/") + url_paging = "{0}/archives/p{1}-10.html".format(url, paging) + if len(categorie) > 2: + url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging) + self._logger.info(url_paging) + if url_paging not in webpage: + webpage.append(url_paging) + page = requests.get(url_paging) + if page.status_code == 200: + soup = BeautifulSoup(page.text, 'html.parser') + h2 = soup.find_all("h2") + for title in h2: + href = title.find_all("a")[0].get("href", "/") + if href not in webpage: + try: + o = urlparse(href) + o = o._replace(scheme="https").geturl() + except Exception as err: + self._logger.error("parsing error : {0}".format(err)) + exit(1) + webpage.append(o) + return webpage + + + def _downloadPage(self, webpage, backup_dir): + + for i in range(0, len(webpage)): + try: + o = urlparse(webpage[i]) + except Exception as err: + self._logger.error("parsing error : {0}".format(err)) + exit(1) + path_web = o.path.split("/") + filePageWeb = path_web[len(path_web)-1] + path_web.pop(len(path_web)-1) + dir_page_web = "/".join(path_web) + self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web)) + try: + r = requests.get(webpage[i]) + except Exception as err: + self._logger.error("Connection error : {0}".format(err)) + exit(1) + if r.status_code == 200: + fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc) + if len(dir_page_web) > 0 and len(filePageWeb) > 0: + fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb) + self._logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload)) + try: + open(fileDownload, "wb").write(r.content) + except Exception as err: + self._logger.error("file error : {0}".format(err)) + exit(1) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--url", help="canblog URL to be scraping", required=True) + parser.add_argument("--dir", + default="backup", + help="backup file path") + parser.add_argument("--debug", help="Verbosity", action="store_true") + parser.add_argument("--logfile", help="Log file", default="") + parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true") + parser.add_argument("--no-js", help="No JS", dest="js", action="store_true") + parser.add_argument("--no-img", help="No img", dest="img", action="store_true") + parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true") + parser.add_argument("--quiet", help="No console output", action="store_true") + args = parser.parse_args() + logger = logging.getLogger('web_scrap') + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + if args.quiet is False: + ch = logging.StreamHandler() + if args.debug is True: + logger.setLevel(logging.DEBUG) + ch.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + ch.setLevel(logging.INFO) + ch.setFormatter(formatter) + logger.addHandler(ch) + + + if len(args.logfile) > 0: + fileHandler = logging.FileHandler(args.logfile) + if args.debug is True: + fileHandler.setLevel(logging.DEBUG) + else: + fileHandler.setLevel(logging.INFO) + fileHandler.setFormatter(formatter) + logger.addHandler(fileHandler) + + try: + o = urlparse(args.url) + o = o._replace(scheme="https") + url = o.geturl().replace(":///", "://") + except Exception as err: + logger.error("parsing error : {0}".format(err)) + if args.js is False: + script = getScriptCss(url, True, False, logger) + downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger) + + if args.css is False: + css = getScriptCss(url, False, True, logger) + downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger) + + if args.html is False or args.img is False: + webpage = getUrlPage(url, logger) + if args.html is False: + downloadPage(webpage, args.dir, logger) + + if args.img is False: + page_src = getImg(webpage, logger) + downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)