16 Commits

Author SHA1 Message Date
4b9a790f8a add debug 2023-03-13 21:35:32 +01:00
a6d642811e add script backup canalblog 2023-03-09 23:06:34 +01:00
dfc9c4277b Merge pull request 'add option quiet' (#5) from quiet into master
Reviewed-on: #5
2023-03-09 21:56:58 +00:00
89ea5720e5 add option quiet 2023-03-09 22:56:24 +01:00
c62e3e6646 Merge pull request 'add logfile' (#4) from logfile into master
Reviewed-on: #4
2023-03-09 20:09:54 +00:00
a04baa4dca add logfile 2023-03-09 21:08:23 +01:00
36bd30bd5a Merge pull request 'exception' (#3) from exception into master
Reviewed-on: #3
2023-03-09 19:36:48 +00:00
699cdc350e add except for parsing 2023-03-09 20:35:47 +01:00
bf5a5b7eb3 add exception for connection and createdir 2023-03-09 20:27:49 +01:00
2b3729a7bc Merge pull request 'image_script' (#2) from image_script into master
Reviewed-on: #2
2023-03-08 21:42:53 +00:00
4d073e0254 fix path with url 2023-03-08 22:41:35 +01:00
77e61ef571 fix path 2023-03-08 22:05:25 +01:00
21d24d638d add argument 2023-03-08 22:01:11 +01:00
896cfa0d52 remove comment useless 2023-03-07 22:53:33 +01:00
06599d99fa download css 2023-03-07 22:50:40 +01:00
1a67ab7dbf download script js 2023-03-07 22:42:05 +01:00
2 changed files with 205 additions and 28 deletions

47
backup_canalblog.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
TAR=/usr/bin/tar
PYTHON=/usr/bin/python3
GZIP=/usr/bin/gzip
SCRIPTDIR=/home/valentin/script
WEBSCRAP=${SCRIPTDIR}/web_scrap.py
URL=www.clarissariviere.com
DATE=$(date +%Y%m%d)
DIRECTORY=/home/valentin/backup
BACKUPDIR=/home/valentin/backup_clarissa
LIST=${BACKUPDIR}/backup.list
fileBackup="backup-clarissa-${DATE}"
LOGFILE=web_scrap.txt
SENDER="valczebackup@gmail.com"
if [ $(date +%u) -eq 5 ]; then
echo > ${LIST}
rm -rf "${BACKUPDIR}/*-incr.tar.gz"
fileBackup="${fileBackup}-full"
subject="Sauvegarde full"
else
fileBackup="${fileBackup}-incr"
subject="Sauvegarde incremental"
fi
subject="${subject} ${URL} ${DATE}"
echo > ${BACKUPDIR}/${LOGFILE}
${PYTHON} ${WEBSCRAP} --url ${URL} --dir ${DIRECTORY} --quiet --logfile ${BACKUPDIR}/${LOGFILE}
if [ ${?} -ne 0 ]; then
subject="${subject} echoue : recuperation page"
echo ${subject} | mail -s "${subject}" -A ${BACKUPDIR}/${LOGFILE} ${SENDER}
exit 1
fi
${TAR} --create --file="${BACKUPDIR}/${fileBackup}.tar" --listed-incremental=${LIST} ${DIRECTORY}
if [ ${?} -ne 0 ]; then
subject="${subject} echoue : archivage page "
echo ${subject} | mail -s "${subject}" -A ${BACKUPDIR}/${LOGFILE} ${SENDER}
exit 1
fi
${GZIP} -f -9 "${BACKUPDIR}/${fileBackup}.tar"
if [ ${?} -ne 0 ]; then
subject="${subject} echoue : compression archive "
echo ${subject} | mail -s "${subject}" -A ${BACKUPDIR}/${LOGFILE} ${SENDER}
exit 1
fi
subject="${subject} OK"
echo ${subject}| mail -s "${subject}" -A ${BACKUPDIR}/${LOGFILE} ${SENDER}
exit 0

View File

@@ -12,11 +12,88 @@ def mkdirPath(path_dir, logger):
repath = "/".join(makedir) repath = "/".join(makedir)
if not os.path.exists(repath): if not os.path.exists(repath):
logger.debug("Dossier crée : {0}".format(repath)) logger.debug("Dossier crée : {0}".format(repath))
os.mkdir(repath) try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
logger.error("Directory error : {0}".format(err))
logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
def getScriptCss(url, js, css, logger):
try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
u = urlparse(url)
o = urlparse(src)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
u = urlparse(url)
o = urlparse(href)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
return page_url
def getImg(webpage, logger):
page_img = []
for i in webpage:
try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
img = soup.find_all("img")
logger.info("image from page: {0} : ".format(i))
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
logger.info("image: {0} : ".format(src))
page_img.append(src)
return page_img
def getUrlPage(url, logger): def getUrlPage(url, logger):
page = requests.get(url) try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = [] page_url = []
if page.status_code == 200: if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser') soup = BeautifulSoup(page.text, 'html.parser')
@@ -28,7 +105,11 @@ def getUrlPage(url, logger):
webpage = [] webpage = []
for i in page_url: for i in page_url:
page = requests.get(i) try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200: if page.status_code == 200:
logger.info("page : {0}".format(i)) logger.info("page : {0}".format(i))
if i not in webpage: if i not in webpage:
@@ -58,28 +139,44 @@ def getUrlPage(url, logger):
for title in h2: for title in h2:
href = title.find_all("a")[0].get("href", "/") href = title.find_all("a")[0].get("href", "/")
if href not in webpage: if href not in webpage:
o = urlparse(href) try:
o = o._replace(scheme="https").geturl() o = urlparse(href)
o = o._replace(scheme="https").geturl()
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
webpage.append(o) webpage.append(o)
return webpage return webpage
def downloadPageHTML(webpage, backup_dir, logger): def downloadPage(webpage, backup_dir, logger):
for i in range(0, len(webpage)): for i in range(0, len(webpage)):
o = urlparse(webpage[i]) try:
o = urlparse(webpage[i])
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
path_web = o.path.split("/") path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1] filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1) path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web) dir_page_web = "/".join(path_web)
mkdirPath("{0}/{1}".format(backup_dir, dir_page_web), logger) mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web), logger)
r = requests.get(webpage[i]) try:
r = requests.get(webpage[i])
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if r.status_code == 200: if r.status_code == 200:
fileDownload = "{0}/index.html".format(backup_dir) fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0: if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}{1}/{2}".format(backup_dir, dir_page_web, filePageWeb) fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
logger.info("{0}/{1} : {2}".format(i, len(webpage), fileDownload)) logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
open(fileDownload, "wb").write(r.content) try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
logger.error("file error : {0}".format(err))
exit(1)
if __name__ == '__main__': if __name__ == '__main__':
@@ -89,23 +186,56 @@ if __name__ == '__main__':
default="backup", default="backup",
help="backup file path") help="backup file path")
parser.add_argument("--debug", help="Verbosity", action="store_true") parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
parser.add_argument("--quiet", help="No console output", action="store_true")
args = parser.parse_args() args = parser.parse_args()
logger = logging.getLogger('web_scrap') logger = logging.getLogger('web_scrap')
ch = logging.StreamHandler()
if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
o = urlparse(args.url) if args.quiet is False:
o = o._replace(scheme="https") ch = logging.StreamHandler()
webpage = getUrlPage(o.geturl().replace(":///", "://"), logger) if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
downloadPageHTML(webpage, args.dir, logger)
if len(args.logfile) > 0:
fileHandler = logging.FileHandler(args.logfile)
if args.debug is True:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
try:
o = urlparse(args.url)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
if args.js is False:
script = getScriptCss(url, True, False, logger)
downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
if args.css is False:
css = getScriptCss(url, False, True, logger)
downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
if args.html is False or args.img is False:
webpage = getUrlPage(url, logger)
if args.html is False:
downloadPage(webpage, args.dir, logger)
if args.img is False:
page_src = getImg(webpage, logger)
downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)