Merge pull request 'insert' (#6) from insert into master

Reviewed-on: #6
This commit is contained in:
v4l3n71n 2023-04-11 21:29:53 +00:00
commit f5e82fe4c4
6 changed files with 689 additions and 241 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
backup/
backup1/
web_scrap.log
__pycache__/

159
import_export_canalblog.py Normal file
View File

@ -0,0 +1,159 @@
#!/usr/bin/python3
from requests.auth import HTTPBasicAuth
from getpass import getpass
from urllib.parse import urlparse
import argparse, logging
from lib.WPImport import WPimport
from lib.WPExport import WPExport
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--quiet", help="No console output", action="store_true")
parser.add_argument("--parser", help="Parser content", default="html.parser")
subparsers = parser.add_subparsers(dest="command")
import_parser = subparsers.add_parser("import")
import_parser.add_argument("--user", help="wordpress user", required=True)
import_parser.add_argument("--file", help="HTML file", default="")
import_parser.add_argument("--directory", help="HTML directory", default="")
import_parser.add_argument("--canalblog", help="URL Canalblog", default="")
import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
import_parser.add_argument("--serial", help="Serial execution", action="store_true")
export_parser = subparsers.add_parser("export")
export_parser.add_argument("--url", help="canblog URL to be scraping", required=True)
export_parser.add_argument("--directory",
default="backup",
help="backup file path")
export_parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
export_parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
export_parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
export_parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
args = parser.parse_args()
logger = logging.getLogger('import export canalblog')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.quiet is False:
ch = logging.StreamHandler()
if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(args.logfile) > 0:
fileHandler = logging.FileHandler(args.logfile)
if args.debug is True:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
if args.command == "import":
password = getpass()
if len(password) == 0:
logger.error("No password error !!! ")
exit(1)
basic = HTTPBasicAuth(args.user, password)
wordpress = args.wordpress.split(",")
importWp = WPimport(basic, "", logger, args.parser)
if len(args.file) > 0:
for i in wordpress:
importWp.setUrl(i)
importWp.fromFile(args.file.split(","))
exit(0)
if len(args.directory) > 0:
directory = args.directory.split(",")
if args.serial is False:
for i in wordpress:
importWp.setUrl(i)
for j in directory:
importWp.fromDirectory(j)
else:
if len(directory) != len(wordpress):
logger.error("ERREUR : Le nombre de dossier n'est pas equivalent au nombre d'URL wordpress")
exit(1)
for i in range(0, len(wordpress)-1):
importWp.setUrl(wordpress[i])
importWp.fromDirectory(directory[i])
exit(0)
if len(args.canalblog) > 0:
exportWp = WPExport("", logger, args.parser, args.directory)
canalblog = args.canalblog.split(",")
wordpress = args.wordpress.split(",")
if args.serial is False:
for canal in canalblog:
try:
o = urlparse(canal)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
exportWp.setUrl(url)
webpage = exportWp.getUrlPage()
for j in wordpress:
importWp.setUrl(j)
importWp.fromUrl(webpage)
else:
if len(canalblog) != len(wordpress):
logger.error("ERREUR : Le nombre de dossier n'est pas equivalent au nombre d'URL wordpress")
exit(1)
for i in range(0, len(canalblog)-1):
try:
o = urlparse(canalblog[i])
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
exportWp.setUrl(url)
webpage = exportWp.getUrlPage()
importWp.setUrl(wordpress[i])
importWp.fromUrl(webpage)
if args.command == "export":
canalblog = args.url.split(",")
exportWp = WPExport("", logger, args.parser, args.directory)
for canal in canalblog:
try:
o = urlparse(canal)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
exportWp.setUrl(url)
if args.js is False:
exportWp.downloadJs()
if args.css is False:
exportWp.downloadCss()
if args.html is False or args.img is False:
webpage = exportWp.getUrlPage()
if args.html is False:
exportWp.downloadHTML(webpage)
if args.img is False:
exportWp.downloadImg(webpage)
exit(0)

226
lib/WPExport.py Normal file
View File

@ -0,0 +1,226 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging
class WPExport:
def __init__(self, url, logger, parser, directory):
self._url = url
self._logger = logger
self._parser = parser
self._dir = directory
# Public method
# Set URL
def setUrl(self, url):
self._url = url
# Download JS
def downloadJs(self):
script = self._getScriptCss(True, False)
o = urlparse(self._url)
self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))
# Download CSS
def downloadCss(self):
css = self._getScriptCss(False, True)
o = urlparse(self._url)
self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
# Download HTML
def downloadHTML(self, webpage):
self._downloadPage(webpage, self._dir)
# Download Image
def downloadImg(self, webpage):
page_src = self._getImg(webpage)
o = urlparse(self._url)
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
# Get URL
def getUrlPage(self):
try:
page = requests.get(self._url)
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
webpage = []
for i in page_url:
try:
page = requests.get(i)
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
self._logger.info("page : {0}".format(i))
if i not in webpage:
webpage.append(i)
soup = BeautifulSoup(page.text, self._parser)
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
for j in range(1,int(number_lastpage)):
paging = j * 10
categorie = urlparse(i).path.split("/")
url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
if len(categorie) > 2:
url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
self._logger.info(url_paging)
if url_paging not in webpage:
webpage.append(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
if href not in webpage:
try:
o = urlparse(href)
o = o._replace(scheme="https").geturl()
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
webpage.append(o)
return webpage
# Private method
#
# Create path
def _mkdirPath(self, path_dir):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
self._logger.debug("Dossier crée : {0}".format(repath))
try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
self._logger.error("Directory error : {0}".format(err))
self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
# Get Css and JS
def _getScriptCss(self, js, css):
try:
page = requests.get(self._url)
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
u = urlparse(self._url)
o = urlparse(src)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
u = urlparse(self._url)
o = urlparse(href)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
return page_url
# Get image
def _getImg(self, webpage):
page_img = []
for i in webpage:
try:
page = requests.get(i)
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
img = soup.find_all("img")
self._logger.info("image from page: {0} : ".format(i))
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
self._logger.info("image: {0} : ".format(src))
page_img.append(src)
return page_img
# Download page
def _downloadPage(self, webpage, backup_dir):
for i in range(0, len(webpage)):
try:
o = urlparse(webpage[i])
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
try:
r = requests.get(webpage[i])
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
if r.status_code == 200:
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
self._logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
self._logger.error("file error : {0}".format(err))
exit(1)

300
lib/WPImport.py Normal file
View File

@ -0,0 +1,300 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, logging, re, json
class WPimport:
# Constructor
def __init__(self, basic, wordpress, logger, parser):
self._basic = basic
self._wordpress = wordpress
self._logger = logger
self._parser = parser
# Public method
def setUrl(self, wordpress):
self._wordpress = wordpress
def fromUrl(self, webpage):
for i in range(0, len(webpage)):
r = requests.get(webpage[i])
if r.status_code == 200:
self._logger.info("({0}/{1} : Page en cours d'import : {2}".format(i+1, len(webpage), webpage[i]))
soup = BeautifulSoup(r.content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
else:
self._addOrUpdateFeaturedMedia(soup)
def fromDirectory(self, directory):
directory = "{0}/archives".format(directory)
directories = self._getDirectories([], "{0}".format(directory))
files = self._getFiles(directories)
self.fromFile(files)
def fromFile(self, files):
for file in files:
if os.path.exists(file):
self._logger.info("Fichier en cours de traitement : {0}".format(file))
with open(file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
else:
self._addOrUpdateFeaturedMedia(soup)
# Private method
## Get all files
def _getFiles(self, item):
files = []
for i in item:
for j in os.listdir(i):
if os.path.isfile("{0}/{1}".format(i, j)):
files.append("{0}/{1}".format(i, j))
return files
## Get directories
def _getDirectories(self, subdirectory, item):
sub = subdirectory
for i in os.listdir(item):
if os.path.isdir("{0}/{1}".format(item, i)):
sub.append("{0}/{1}".format(item, i))
subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
return subdirectory
## Add or update featured media
def _addOrUpdateFeaturedMedia(self, soup):
item_div = soup.find_all("div", {"data-edittype": "post"})
for i in item_div:
h2 = i.find_all("h2")[0].text
params = {"search":h2, "type":"post"}
page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
if len(result) > 0:
if h2 == result[0]["title"]:
img = i.find_all("img")
if len(img) > 0:
img_src = img[0].get("src")
page = requests.get(img_src)
if page.status_code == 200:
name_img = img_src.replace("_q", "")
name_img = name_img.split("/")[len(name_img.split("/"))-1]
params = {"search": name_img}
page = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
res = page.json()
if len(res) > 0:
id_media = res[0]["id"]
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
data = {"featured_media": id_media}
r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
if r.status_code == 200:
self._logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
else:
self._logger.info("Aucun media trouvé pour {0}".format(h2))
## Association image to post
def _linkImgPost(self, title, list_img, post_id):
for i in list_img:
data = {"post": post_id}
r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
self._logger.info("Association d'une image à l'article {0}".format(title))
## Add or update img
def _addOrUpdateMedia(self, href_img, page):
media = {"id":"", "rendered":""}
split_fileimg = href_img.split("/")
img_name = split_fileimg[len(split_fileimg)-1]
params = { "search": img_name}
r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
if r.status_code == 200:
res = r.json()
if len(res) > 0:
params = {"force":1}
r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
if r.status_code == 200:
self._logger.info("Image supprimé {0}".format(img_name))
data = page.content
img_type = "image/png"
if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
img_type = "image/jpg"
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
self._logger.info("Ajout d'image {0}".format(img_name))
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
return media
## Add or update comment
def _addOrUpdateComment(self, post, comment, title):
params = {"post": post}
block = True
page = requests.get("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
for i in comment:
comment_exist = False
for j in result:
if i["author"] == j["author_name"] and i["date"] == j["date"]:
comment_exist = True
id_comment = j["id"]
data = {"post": post, "content": i["content"], "date": i["date"], "author_name": i["author"]}
if comment_exist is True:
page = page = requests.post("http://{0}/wp-json/wp/v2/comments/{1}".format(self._wordpress, id_comment), auth=self._basic, data=data)
if page.status_code == 200:
self._logger.info("Commentaire mise à jour pour {0}".format(title))
else:
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
if page.status_code == 201:
self._logger.info("Commentaire ajoute pour {0}".format(title))
## Add or Update post
def _addOrUpdatePost(self, soup):
tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
liste = ["categories", "tags"]
elements = {}
element = {}
listelement = {}
for i in liste:
page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress,i))
if page.status_code == 200:
elements[i] = page.json()
element[i] = []
listelement[i] = []
articletitle = soup.find_all("h2", class_="articletitle")
articlebody = soup.find_all("div", class_="articlebody")
articledate = soup.find_all("span", class_="articledate")
articleacreator = soup.find_all("span", class_="articlecreator")
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
comment = soup.find_all("div", class_="comment_item")
img_a = articlebody[0].find_all("a", {"target": "_blank"})
list_img = []
for i in img_a:
new_img = {}
img = i.find_all("img")
if len(img) > 0:
href_a = i.get("href")
href_img = img[0].get("src")
new_img["old_src"]=href_img
new_img["old_href"]=href_a
page_img = requests.get(href_img)
if page_img.status_code == 404:
href_img = href_a
page_img = requests.get(href_a)
if page_img.status_code == 200:
media=self._addOrUpdateMedia(href_img, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
list_img.append(new_img)
if href_img != href_a:
media=self._addOrUpdateMedia(href_a, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
list_img.append(new_img)
comment_post = []
for i in comment:
comment_item = i.text.split("\n")
footer = i.find_all("div", class_="itemfooter")
comment_author = footer[0].text.split(",")[0].replace("Posté par ", "")
comment_date = footer[0].find_all("abbr")[0].get("title")
comment_content = "<p>"
for j in range(0, len(comment_item)-2):
if len(comment_item[j]) > 0:
comment_content = comment_content + comment_item[j] + "<br />"
comment_content = comment_content + "</p>"
comment_post.append({"author": comment_author, "date": comment_date, "content": comment_content})
a = itemfooter[0].find_all("a", {"rel": True})
for i in a:
rel = i.get("rel")
if rel[0] == 'tag':
href = i.get("href")
if re.search(r'/tag/', href):
element["tags"].append(i.text)
if re.search(r'/archives/', href):
element["categories"].append(i.text)
for i in liste:
for j in element[i]:
element_exist = False
for k in elements[i]:
if k["name"] == j:
element_exist = True
listelement[i].append(k["id"])
if element_exist is False:
data = {"name": j}
page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i), auth=self._basic, data=data)
if page.status_code == 201:
result = page.json()
listelement[i].append(result["id"])
title = articletitle[0].text
author = articleacreator[0].text.lower()
body = articlebody[0].find_all("p")
bodyhtml = "<p>"
for i in body:
if len(i.text) == 1:
bodyhtml = bodyhtml + "<br />"
else:
bodyhtml = bodyhtml + str(i).replace("<p>", "").replace("</p>", "").replace("<br>", "<br />") + "<br />"
bodyhtml = bodyhtml + "</p>"
for i in list_img:
o = urlparse(i["new_src"])
bodyhtml = bodyhtml.replace(i["old_href"], o.path)
bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
data["author"] = result[0]["id"]
params = {"search":title}
page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, params=params)
page_exist = True
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
if page.status_code == 200:
result = page.json()
if len(result) == 0:
page_exist = False
else:
self._logger.info("La page {0} existe deja et mis à jour".format(title))
post_id = result[0]["id"]
page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
self._logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
if page_exist == False:
page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
self._logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])

0
lib/__init__.py Normal file
View File

View File

@ -1,241 +0,0 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging
def mkdirPath(path_dir, logger):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
logger.debug("Dossier crée : {0}".format(repath))
try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
logger.error("Directory error : {0}".format(err))
logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
def getScriptCss(url, js, css, logger):
try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
u = urlparse(url)
o = urlparse(src)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
u = urlparse(url)
o = urlparse(href)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
return page_url
def getImg(webpage, logger):
page_img = []
for i in webpage:
try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
img = soup.find_all("img")
logger.info("image from page: {0} : ".format(i))
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
logger.info("image: {0} : ".format(src))
page_img.append(src)
return page_img
def getUrlPage(url, logger):
try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
webpage = []
for i in page_url:
try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
logger.info("page : {0}".format(i))
if i not in webpage:
webpage.append(i)
soup = BeautifulSoup(page.text, 'html.parser')
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
for j in range(1,int(number_lastpage)):
paging = j * 10
categorie = urlparse(i).path.split("/")
url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
if len(categorie) > 2:
url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
logger.info(url_paging)
if url_paging not in webpage:
webpage.append(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
if href not in webpage:
try:
o = urlparse(href)
o = o._replace(scheme="https").geturl()
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
webpage.append(o)
return webpage
def downloadPage(webpage, backup_dir, logger):
for i in range(0, len(webpage)):
try:
o = urlparse(webpage[i])
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web), logger)
try:
r = requests.get(webpage[i])
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if r.status_code == 200:
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
logger.error("file error : {0}".format(err))
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="canblog URL to be scraping", required=True)
parser.add_argument("--dir",
default="backup",
help="backup file path")
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
parser.add_argument("--quiet", help="No console output", action="store_true")
args = parser.parse_args()
logger = logging.getLogger('web_scrap')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.quiet is False:
ch = logging.StreamHandler()
if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(args.logfile) > 0:
fileHandler = logging.FileHandler(args.logfile)
if args.debug is True:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
try:
o = urlparse(args.url)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
if args.js is False:
script = getScriptCss(url, True, False, logger)
downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
if args.css is False:
css = getScriptCss(url, False, True, logger)
downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
if args.html is False or args.img is False:
webpage = getUrlPage(url, logger)
if args.html is False:
downloadPage(webpage, args.dir, logger)
if args.img is False:
page_src = getImg(webpage, logger)
downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)