Merge pull request 'web_scap' (#1) from web_scap into master
Reviewed-on: #1
This commit is contained in:
commit
e809e376e5
115
web_scrap.py
115
web_scrap.py
@ -1,12 +1,109 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
# Python 3
|
|
||||||
# Extraction des liens d'une page web
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
import urllib.request
|
from urllib.parse import urlparse
|
||||||
|
import requests, os, argparse, logging
|
||||||
|
|
||||||
with urllib.request.urlopen('https://www.clarissariviere.com/') as response:
|
def mkdir_path(path_dir, logger):
|
||||||
webpage = response.read()
|
if not os.path.exists(path_dir):
|
||||||
soup = BeautifulSoup(webpage, 'html.parser')
|
makedir = []
|
||||||
for anchor in soup.find_all('a'):
|
pathh = path_dir.split("/")
|
||||||
print(anchor.get('href', '/'))
|
for i in pathh:
|
||||||
|
makedir.append(i)
|
||||||
|
repath = "/".join(makedir)
|
||||||
|
if not os.path.exists(repath):
|
||||||
|
logger.debug("Dossier crée : {0}".format(repath))
|
||||||
|
os.mkdir(repath)
|
||||||
|
|
||||||
|
|
||||||
|
def getUrlPage(url, logger):
|
||||||
|
page = requests.get(url)
|
||||||
|
page_url = []
|
||||||
|
if page.status_code == 200:
|
||||||
|
soup = BeautifulSoup(page.text, 'html.parser')
|
||||||
|
ul = soup.find_all("ul", id="listsmooth")
|
||||||
|
for anchor in ul[0].find_all("a"):
|
||||||
|
href = anchor.get('href', '/')
|
||||||
|
if href != "#":
|
||||||
|
page_url.append(href)
|
||||||
|
|
||||||
|
webpage = []
|
||||||
|
for i in page_url:
|
||||||
|
page = requests.get(i)
|
||||||
|
if page.status_code == 200:
|
||||||
|
logger.info("page : {0}".format(i))
|
||||||
|
if i not in webpage:
|
||||||
|
webpage.append(i)
|
||||||
|
soup = BeautifulSoup(page.text, 'html.parser')
|
||||||
|
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
|
||||||
|
if len(class_div) > 0:
|
||||||
|
pagingfirstline = class_div[0].find_all("a")
|
||||||
|
if len(pagingfirstline) > 1:
|
||||||
|
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
|
||||||
|
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
|
||||||
|
number_page = element_lastpage.split("-")[0].split("p")[1]
|
||||||
|
number_lastpage = int(number_page) / 10
|
||||||
|
for j in range(1,int(number_lastpage)):
|
||||||
|
paging = j * 10
|
||||||
|
categorie = urlparse(i).path.split("/")
|
||||||
|
url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
|
||||||
|
if len(categorie) > 2:
|
||||||
|
url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
|
||||||
|
logger.info(url_paging)
|
||||||
|
if url_paging not in webpage:
|
||||||
|
webpage.append(url_paging)
|
||||||
|
page = requests.get(url_paging)
|
||||||
|
if page.status_code == 200:
|
||||||
|
soup = BeautifulSoup(page.text, 'html.parser')
|
||||||
|
h2 = soup.find_all("h2")
|
||||||
|
for title in h2:
|
||||||
|
href = title.find_all("a")[0].get("href", "/")
|
||||||
|
if href not in webpage:
|
||||||
|
o = urlparse(href)
|
||||||
|
o = o._replace(scheme="https").geturl()
|
||||||
|
webpage.append(o)
|
||||||
|
return webpage
|
||||||
|
|
||||||
|
|
||||||
|
def downloadPage(url, backup_dir, logger):
|
||||||
|
o = urlparse(url)
|
||||||
|
o = o._replace(scheme="https")
|
||||||
|
webpage = getUrlPage(o.geturl().replace(":///", "://"), logger)
|
||||||
|
for i in range(0, len(webpage)):
|
||||||
|
o = urlparse(webpage[i])
|
||||||
|
path_web = o.path.split("/")
|
||||||
|
filePageWeb = path_web[len(path_web)-1]
|
||||||
|
path_web.pop(len(path_web)-1)
|
||||||
|
dir_page_web = "/".join(path_web)
|
||||||
|
mkdir_path("{0}/{1}".format(backup_dir, dir_page_web), logger)
|
||||||
|
r = requests.get(webpage[i])
|
||||||
|
if r.status_code == 200:
|
||||||
|
fileDownload = "{0}/index.html".format(backup_dir)
|
||||||
|
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
|
||||||
|
fileDownload = "{0}/{1}/{2}".format(backup_dir, dir_page_web, filePageWeb)
|
||||||
|
logger.info("{0}/{1} : {2}".format(i, len(webpage), fileDownload))
|
||||||
|
open(fileDownload, "wb").write(r.content)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--url", help="canblog URL to be scraping", required=True)
|
||||||
|
parser.add_argument("--dir",
|
||||||
|
default="backup",
|
||||||
|
help="backup file path")
|
||||||
|
parser.add_argument("--debug", help="Verbosity", action="store_true")
|
||||||
|
args = parser.parse_args()
|
||||||
|
logger = logging.getLogger('web_scrap')
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
|
||||||
|
if args.debug is not None:
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
ch.setLevel(logging.DEBUG)
|
||||||
|
else:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
downloadPage(args.url, args.dir, logger)
|
Loading…
x
Reference in New Issue
Block a user