Download file html

This commit is contained in:
Valentin CZERYBA 2023-03-05 21:44:30 +01:00
parent a3aceccba7
commit c7dc2d626f

View File

@ -1,13 +1,7 @@
#!/usr/bin/python3 #!/usr/bin/python3
# Python 3
# Extraction des liens d'une page web
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from urllib.parse import urlparse from urllib.parse import urlparse
import requests, os import requests, os, argparse
BACKUP_DIR = "backup"
URL = "www.clarissariviere.com"
def mkdir_path(path_dir): def mkdir_path(path_dir):
if not os.path.exists(path_dir): if not os.path.exists(path_dir):
@ -20,10 +14,7 @@ def mkdir_path(path_dir):
os.mkdir(repath) os.mkdir(repath)
def getUrlPage(url): def getUrlPage(url):
print(url)
page = requests.get(url) page = requests.get(url)
page_url = [] page_url = []
if page.status_code == 200: if page.status_code == 200:
@ -69,21 +60,35 @@ def getUrlPage(url):
o = urlparse(href) o = urlparse(href)
o = o._replace(scheme="https").geturl() o = o._replace(scheme="https").geturl()
webpage.append(o) webpage.append(o)
return webpage return webpage
def downloadPage(url): def downloadPage(url, backup_dir):
o = urlparse(url) o = urlparse(url)
o = o._replace(scheme="https") o = o._replace(scheme="https")
o = o._replace(fragment="")
webpage = getUrlPage(o.geturl().replace(":///", "://")) webpage = getUrlPage(o.geturl().replace(":///", "://"))
for i in webpage: for i in range(0, len(webpage)):
o = urlparse(i) o = urlparse(webpage[i])
path_web = o.path.split("/") path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1) path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web) dir_page_web = "/".join(path_web)
mkdir_path("{0}/{1}".format(BACKUP_DIR, dir_page_web)) mkdir_path("{0}/{1}".format(backup_dir, dir_page_web))
r = requests.get(webpage[i])
if r.status_code == 200:
fileDownload = "{0}/index.html".format(backup_dir)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}/{2}".format(backup_dir, dir_page_web, filePageWeb)
print("{0}/{1} : {2}".format(i, len(webpage), fileDownload))
open(fileDownload, "wb").write(r.content)
if __name__ == '__main__': if __name__ == '__main__':
downloadPage(URL) parser = argparse.ArgumentParser()
parser.add_argument("--url", help="canblog URL to be scraping", required=True)
parser.add_argument("--dir",
default="backup",
help="backup file path")
parser.add_argument("--verbosity", help="Verbosity", action="store_false")
args = parser.parse_args()
downloadPage(args.url, args.dir)