rm web_scrap + add set url + add backup1 to gitignore
This commit is contained in:
parent
ed78f22f2e
commit
4ddc4a7cd3
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,2 +1,3 @@
|
|||||||
backup/
|
backup/
|
||||||
|
backup1/
|
||||||
web_scrap.log
|
web_scrap.log
|
||||||
|
@ -13,6 +13,11 @@ class WPExport:
|
|||||||
|
|
||||||
# Public method
|
# Public method
|
||||||
|
|
||||||
|
# Set URL
|
||||||
|
|
||||||
|
def setUrl(self, url):
|
||||||
|
self._url = url
|
||||||
|
|
||||||
# Download JS
|
# Download JS
|
||||||
|
|
||||||
def downloadJs(self):
|
def downloadJs(self):
|
||||||
|
@ -14,6 +14,9 @@ class WPimport:
|
|||||||
|
|
||||||
# Public method
|
# Public method
|
||||||
|
|
||||||
|
def setUrl(self, wordpress):
|
||||||
|
self._wordpress = wordpress
|
||||||
|
|
||||||
def fromUrl(self, webpage):
|
def fromUrl(self, webpage):
|
||||||
for i in range(0, len(webpage)):
|
for i in range(0, len(webpage)):
|
||||||
r = requests.get(webpage[i])
|
r = requests.get(webpage[i])
|
||||||
|
241
web_scrap.py
241
web_scrap.py
@ -1,241 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
import requests, os, argparse, logging
|
|
||||||
|
|
||||||
def mkdirPath(path_dir, logger):
|
|
||||||
if not os.path.exists(path_dir):
|
|
||||||
makedir = []
|
|
||||||
pathh = path_dir.split("/")
|
|
||||||
for i in pathh:
|
|
||||||
makedir.append(i)
|
|
||||||
repath = "/".join(makedir)
|
|
||||||
if not os.path.exists(repath):
|
|
||||||
logger.debug("Dossier crée : {0}".format(repath))
|
|
||||||
try:
|
|
||||||
if len(repath) > 0:
|
|
||||||
os.mkdir(repath)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Directory error : {0}".format(err))
|
|
||||||
logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def getScriptCss(url, js, css, logger):
|
|
||||||
try:
|
|
||||||
page = requests.get(url)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Connection error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
page_url = []
|
|
||||||
if page.status_code == 200:
|
|
||||||
soup = BeautifulSoup(page.text, 'html.parser')
|
|
||||||
if js is True:
|
|
||||||
script = soup.find_all("script")
|
|
||||||
for anchor in script:
|
|
||||||
src = anchor.get("src", "/")
|
|
||||||
if src != "/":
|
|
||||||
try:
|
|
||||||
u = urlparse(url)
|
|
||||||
o = urlparse(src)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("parsing error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
if o.netloc == "":
|
|
||||||
o = o._replace(netloc=u.netloc)
|
|
||||||
o = o._replace(scheme=u.scheme)
|
|
||||||
page_url.append(o.geturl())
|
|
||||||
if css is True:
|
|
||||||
link = soup.find_all("link")
|
|
||||||
for anchor in link:
|
|
||||||
rel = anchor.get("rel")
|
|
||||||
if rel[0] == "stylesheet":
|
|
||||||
href = anchor.get("href", "/")
|
|
||||||
if href != "/":
|
|
||||||
try:
|
|
||||||
u = urlparse(url)
|
|
||||||
o = urlparse(href)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("parsing error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
if o.netloc == "":
|
|
||||||
o = o._replace(netloc=u.netloc)
|
|
||||||
o = o._replace(scheme=u.scheme)
|
|
||||||
page_url.append(o.geturl())
|
|
||||||
|
|
||||||
|
|
||||||
return page_url
|
|
||||||
|
|
||||||
def getImg(webpage, logger):
|
|
||||||
page_img = []
|
|
||||||
for i in webpage:
|
|
||||||
try:
|
|
||||||
page = requests.get(i)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Connection error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
if page.status_code == 200:
|
|
||||||
soup = BeautifulSoup(page.text, 'html.parser')
|
|
||||||
img = soup.find_all("img")
|
|
||||||
logger.info("image from page: {0} : ".format(i))
|
|
||||||
for anchor in img:
|
|
||||||
src = anchor.get("src", "/")
|
|
||||||
if src != "/":
|
|
||||||
if src not in page_img:
|
|
||||||
logger.info("image: {0} : ".format(src))
|
|
||||||
page_img.append(src)
|
|
||||||
|
|
||||||
|
|
||||||
return page_img
|
|
||||||
|
|
||||||
def getUrlPage(url, logger):
|
|
||||||
try:
|
|
||||||
page = requests.get(url)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Connection error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
page_url = []
|
|
||||||
if page.status_code == 200:
|
|
||||||
soup = BeautifulSoup(page.text, 'html.parser')
|
|
||||||
ul = soup.find_all("ul", id="listsmooth")
|
|
||||||
for anchor in ul[0].find_all("a"):
|
|
||||||
href = anchor.get('href', '/')
|
|
||||||
if href != "#":
|
|
||||||
page_url.append(href)
|
|
||||||
|
|
||||||
webpage = []
|
|
||||||
for i in page_url:
|
|
||||||
try:
|
|
||||||
page = requests.get(i)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Connection error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
if page.status_code == 200:
|
|
||||||
logger.info("page : {0}".format(i))
|
|
||||||
if i not in webpage:
|
|
||||||
webpage.append(i)
|
|
||||||
soup = BeautifulSoup(page.text, 'html.parser')
|
|
||||||
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
|
|
||||||
if len(class_div) > 0:
|
|
||||||
pagingfirstline = class_div[0].find_all("a")
|
|
||||||
if len(pagingfirstline) > 1:
|
|
||||||
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
|
|
||||||
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
|
|
||||||
number_page = element_lastpage.split("-")[0].split("p")[1]
|
|
||||||
number_lastpage = int(number_page) / 10
|
|
||||||
for j in range(1,int(number_lastpage)):
|
|
||||||
paging = j * 10
|
|
||||||
categorie = urlparse(i).path.split("/")
|
|
||||||
url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
|
|
||||||
if len(categorie) > 2:
|
|
||||||
url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
|
|
||||||
logger.info(url_paging)
|
|
||||||
if url_paging not in webpage:
|
|
||||||
webpage.append(url_paging)
|
|
||||||
page = requests.get(url_paging)
|
|
||||||
if page.status_code == 200:
|
|
||||||
soup = BeautifulSoup(page.text, 'html.parser')
|
|
||||||
h2 = soup.find_all("h2")
|
|
||||||
for title in h2:
|
|
||||||
href = title.find_all("a")[0].get("href", "/")
|
|
||||||
if href not in webpage:
|
|
||||||
try:
|
|
||||||
o = urlparse(href)
|
|
||||||
o = o._replace(scheme="https").geturl()
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("parsing error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
webpage.append(o)
|
|
||||||
return webpage
|
|
||||||
|
|
||||||
|
|
||||||
def downloadPage(webpage, backup_dir, logger):
|
|
||||||
|
|
||||||
for i in range(0, len(webpage)):
|
|
||||||
try:
|
|
||||||
o = urlparse(webpage[i])
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("parsing error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
path_web = o.path.split("/")
|
|
||||||
filePageWeb = path_web[len(path_web)-1]
|
|
||||||
path_web.pop(len(path_web)-1)
|
|
||||||
dir_page_web = "/".join(path_web)
|
|
||||||
mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web), logger)
|
|
||||||
try:
|
|
||||||
r = requests.get(webpage[i])
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("Connection error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
if r.status_code == 200:
|
|
||||||
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
|
|
||||||
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
|
|
||||||
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
|
|
||||||
logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
|
|
||||||
try:
|
|
||||||
open(fileDownload, "wb").write(r.content)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("file error : {0}".format(err))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("--url", help="canblog URL to be scraping", required=True)
|
|
||||||
parser.add_argument("--dir",
|
|
||||||
default="backup",
|
|
||||||
help="backup file path")
|
|
||||||
parser.add_argument("--debug", help="Verbosity", action="store_true")
|
|
||||||
parser.add_argument("--logfile", help="Log file", default="")
|
|
||||||
parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
|
|
||||||
parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
|
|
||||||
parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
|
|
||||||
parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
|
|
||||||
parser.add_argument("--quiet", help="No console output", action="store_true")
|
|
||||||
args = parser.parse_args()
|
|
||||||
logger = logging.getLogger('web_scrap')
|
|
||||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
||||||
|
|
||||||
if args.quiet is False:
|
|
||||||
ch = logging.StreamHandler()
|
|
||||||
if args.debug is True:
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
ch.setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
ch.setLevel(logging.INFO)
|
|
||||||
ch.setFormatter(formatter)
|
|
||||||
logger.addHandler(ch)
|
|
||||||
|
|
||||||
|
|
||||||
if len(args.logfile) > 0:
|
|
||||||
fileHandler = logging.FileHandler(args.logfile)
|
|
||||||
if args.debug is True:
|
|
||||||
fileHandler.setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
fileHandler.setLevel(logging.INFO)
|
|
||||||
fileHandler.setFormatter(formatter)
|
|
||||||
logger.addHandler(fileHandler)
|
|
||||||
|
|
||||||
try:
|
|
||||||
o = urlparse(args.url)
|
|
||||||
o = o._replace(scheme="https")
|
|
||||||
url = o.geturl().replace(":///", "://")
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("parsing error : {0}".format(err))
|
|
||||||
if args.js is False:
|
|
||||||
script = getScriptCss(url, True, False, logger)
|
|
||||||
downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
|
|
||||||
|
|
||||||
if args.css is False:
|
|
||||||
css = getScriptCss(url, False, True, logger)
|
|
||||||
downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
|
|
||||||
|
|
||||||
if args.html is False or args.img is False:
|
|
||||||
webpage = getUrlPage(url, logger)
|
|
||||||
if args.html is False:
|
|
||||||
downloadPage(webpage, args.dir, logger)
|
|
||||||
|
|
||||||
if args.img is False:
|
|
||||||
page_src = getImg(webpage, logger)
|
|
||||||
downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)
|
|
Loading…
x
Reference in New Issue
Block a user