234 Commits
1.0.0 ... 4.0.4

Author SHA1 Message Date
f8e8239f40 Merge pull request 'fix-featured-image' (#27) from fix-featured-image into master
Reviewed-on: #27
2023-09-18 21:03:17 +00:00
19f96abb2d add no-menu and fix method linkFeaturedMedia 2023-09-18 22:58:36 +02:00
a936857eb9 add method private link featured media 2023-09-18 22:44:52 +02:00
9ab484da8d Merge pull request 'fix-export' (#26) from fix-export into master
Reviewed-on: #26
2023-09-15 21:43:35 +00:00
c275f10fb0 test ok for featured media 2023-09-15 23:38:35 +02:00
fa1854052b fix if 2023-09-15 23:14:24 +02:00
0e5afe60a7 add image featured media 2023-09-15 23:13:08 +02:00
d2cfb949f8 add image for featured image 2023-09-15 23:11:25 +02:00
bce1643135 fix replace 2023-09-14 21:27:09 +02:00
0c94783852 fix album author 2023-09-14 21:23:27 +02:00
6f78a48e57 fix index 2023-09-11 23:20:48 +02:00
dbab60c7dc fix import album 2023-09-11 23:20:22 +02:00
41e192f903 fix bug import 2023-09-11 22:40:41 +02:00
dce2c2dfa5 Merge pull request 'fix webpage' (#25) from fix-export into master
Reviewed-on: #25
2023-09-05 20:10:07 +00:00
eaec1ba9d4 fix webpage 2023-09-05 22:06:14 +02:00
3059f785c2 Merge pull request 'album-plus' (#24) from album-plus into master
Reviewed-on: #24
2023-09-01 22:27:57 +00:00
279a9f2786 fix parameter author 2023-09-02 00:27:18 +02:00
963f83ae81 fix author 2023-09-02 00:26:50 +02:00
7b154e3a1d add author 2023-08-31 22:50:31 +02:00
e5109204aa get link with album 2023-08-30 23:45:16 +02:00
2279e4b0b6 search title album 50% 2023-08-30 22:39:59 +02:00
2e21040196 add private method get info album 2023-08-29 22:26:15 +02:00
b4d0fe8aa0 Merge pull request 'album' (#23) from album into master
Reviewed-on: #23
2023-08-25 21:47:47 +00:00
6401692d0d finish menu with album import 2023-08-25 23:46:43 +02:00
1fc9c48d2c fix add menu with album 2023-08-24 22:43:15 +02:00
d9c20cedcb add menu 2023-08-24 00:01:46 +02:00
9d41e57379 add method getAuthor 2023-08-23 23:35:35 +02:00
d88ae7ed44 add author 2023-08-23 22:48:38 +02:00
50bf31d334 HTTP Post for album 2023-08-22 00:21:14 +02:00
9b58b45ae8 replace string 2023-08-20 21:41:16 +02:00
418bea3778 add album test wip 2023-08-17 00:18:45 +02:00
5959ab5b2e add galery wip 2023-08-12 00:06:09 +02:00
e4eb1b6b68 add image to media wordpress 2023-08-05 23:22:44 +02:00
9ed5ffe399 add variable 2023-08-05 12:28:33 +02:00
28b513e1b2 add private method album 2023-08-05 12:03:17 +02:00
02f0c20bd0 Merge pull request 'menu-api' (#22) from menu-api into master
Reviewed-on: #22
2023-08-04 21:22:38 +00:00
1655217050 add album article 2023-08-04 23:21:45 +02:00
49c1552062 add wpmenu 2023-07-31 23:57:35 +02:00
7f800c8f7b add menu 100% work 2023-07-31 23:39:40 +02:00
5399b12133 menu wordpress 75% 2023-07-27 00:39:15 +02:00
b7493206a2 add private method addItemMenu 2023-07-26 19:22:43 +02:00
8de7485775 return value menu 2023-07-26 19:18:18 +02:00
eee14e08f1 add update menu item 2023-07-24 23:51:41 +02:00
cf2c1aa617 add item menu 2023-07-24 23:32:20 +02:00
e17cace820 remove navigation 2023-07-24 22:51:40 +02:00
941776a7c1 Create menu 2023-07-24 22:43:52 +02:00
72fbe0a364 add menu WIP debug 2023-07-22 20:23:26 +02:00
1f8ea70b40 define content for navigation 2023-07-20 00:04:11 +02:00
c6894648d1 add link 2023-07-18 22:09:20 +02:00
d5ddf1601b add post fine 2023-07-17 23:20:47 +02:00
aa8ac9cfcb first test id from post 2023-07-15 14:00:23 +02:00
82f9acd658 get title from post 2023-07-15 13:55:01 +02:00
7593b68b6c getting post title 2023-07-15 11:51:15 +02:00
5fe4b1f786 add href 2023-07-13 23:53:54 +02:00
0445054dc8 fix get id menu 2023-07-13 22:35:38 +02:00
be9ad9b934 swap loop 2023-07-13 00:25:14 +02:00
e6328135da get id 2023-07-13 00:09:18 +02:00
dded126926 change return getId 2023-07-11 21:42:50 +02:00
bca529f3c3 parent and children array 2023-07-11 21:39:41 +02:00
5059a15826 menu and submenu 2023-07-11 00:17:24 +02:00
20c4adb3cf menu and sub-menu wip 2023-07-09 21:42:09 +02:00
74fa87ea73 list menu wip 2023-07-09 19:11:05 +02:00
03f833a2c3 Menu and sub-menu 2023-07-09 15:27:20 +02:00
9acb620f93 add condition if 2023-07-09 12:29:13 +02:00
c6ccf98b1b working menu 2023-07-06 00:42:47 +02:00
95f5203727 list menu with id 2023-07-04 00:26:27 +02:00
9bbf769b40 test WIP 2023-07-01 22:16:35 +02:00
f0b4ba5e27 add url and file 2023-07-01 21:56:10 +02:00
47f504beb5 list tag 2023-07-01 00:11:43 +02:00
4b6b06aade add public method from URL 2023-06-30 23:52:56 +02:00
d10867a983 add class WPMenu 2023-06-30 23:28:54 +02:00
6fba5f009a Merge pull request 'directory-file' (#21) from directory-file into master
Reviewed-on: #21
2023-06-28 21:30:20 +00:00
699cecad4f change with tmp files 2023-06-28 23:28:24 +02:00
9f87f38347 fix file tmp for directory 2023-06-28 23:03:27 +02:00
55d62cebfb separate files method 2023-06-27 14:48:48 +02:00
193b0e6ef7 add tmp files wip 2023-06-27 14:37:45 +02:00
b88917127d Merge pull request 'webpage-file' (#20) from webpage-file into master
Reviewed-on: #20
2023-06-26 22:28:26 +00:00
781d8959c4 fix tmp directory parameter 2023-06-27 00:25:23 +02:00
a67ff868f3 fix json read file 2023-06-26 23:52:03 +02:00
8e0abc40bd check files tmp 2023-06-26 23:09:54 +02:00
9149a6c5cb rollback webpage 2023-06-26 22:44:42 +02:00
d1b6e8048a add revert files json 2023-06-25 21:16:05 +02:00
0eab1d885b add open file tmp 2023-06-20 21:38:39 +02:00
35ff22d463 change parameter for webpage 2023-06-20 00:17:38 +02:00
7dace5bdb7 add file tmp 2023-06-19 23:58:59 +02:00
703cc8922a Merge pull request 'diff-img' (#19) from diff-img into master
Reviewed-on: #19
2023-06-16 22:08:50 +00:00
ff3ee301fb diff img path done 2023-06-15 00:10:44 +02:00
04da5bc5f6 diff path network 2023-06-13 22:00:51 +02:00
f01a69a1e7 Merge pull request 'wpchange' (#18) from wpchange into master
Reviewed-on: #18
2023-06-12 22:48:57 +00:00
da4db0277a add img a change 2023-06-13 00:46:18 +02:00
7228911e68 add js and css 2023-06-13 00:38:34 +02:00
9e7e1b27fd change WIP test 2023-06-11 20:24:22 +02:00
16368c13bb add WPChange 2023-06-10 01:58:08 +02:00
c631909cb6 WPchange wip 2023-06-06 00:22:16 +02:00
3e76892676 add wpchange 2023-06-05 23:46:57 +02:00
3e75f05340 Merge pull request 'add-parameter' (#17) from add-parameter into master
Reviewed-on: #17
2023-06-05 20:58:51 +00:00
e48b262d7e add parameter no-image 2023-06-03 09:07:33 +02:00
2f1c081823 add parameter 2023-06-01 15:28:48 +02:00
4bd6f5c038 Merge pull request 'add wait' (#16) from wait_remove into master
Reviewed-on: #16
2023-05-29 21:36:38 +00:00
d3a03e1cb3 add wait 2023-05-29 23:36:11 +02:00
f507efce60 Merge pull request 'replace-exception' (#15) from replace-exception into master
Reviewed-on: #15
2023-05-29 21:29:18 +00:00
75c9fa0ad3 fix if 2023-05-28 22:42:38 +02:00
110ccc4bb1 replace exception for wpexport 2023-05-28 22:42:04 +02:00
269a9e9ccd add replace exception import 2023-05-28 22:31:46 +02:00
4c0ec09d91 move exception 2023-05-28 22:07:43 +02:00
42cfb30583 Merge pull request 'remove-thread' (#14) from remove-thread into master
Reviewed-on: #14
2023-05-26 22:18:19 +00:00
c76b20e64a add remove multithread 2023-05-27 00:16:41 +02:00
aff69bfcbc add multithread for remove 2023-05-27 00:06:11 +02:00
fd426f150d add variable 2023-05-26 17:50:57 +02:00
e21721cac1 move exception 2023-05-26 17:44:28 +02:00
69504687ef add count 2023-05-26 16:38:19 +02:00
fb59746fc0 Merge pull request 'https' (#13) from https into master
Reviewed-on: #13
2023-05-26 09:24:21 +00:00
5916cbff00 fix parameter 2023-05-26 10:04:36 +02:00
cd2fbd5372 add protocol https/http 2023-05-25 00:31:34 +02:00
f3b04f9459 update script backup 2023-05-24 23:34:03 +02:00
a400375e01 remove slugify import 2023-05-24 23:30:23 +02:00
351cb10f01 Merge pull request 'fix-media' (#12) from fix-media into master
Reviewed-on: #12
2023-05-23 14:47:07 +00:00
5c5dc707f5 fix headers search author 2023-05-23 16:46:07 +02:00
f69298179a reduce line code and add private method 2023-05-23 13:45:59 +02:00
d3ec7d147d loop replace 2023-05-23 11:22:37 +02:00
0fc6e78a18 fix title rendered 2023-05-23 00:02:51 +02:00
3718b807ba more message debug 2023-05-21 21:14:36 +02:00
75772ba7f0 remove doublon 2023-05-21 21:12:00 +02:00
769b7f43fc fix add or update post 2023-05-18 00:24:41 +02:00
ba42d56be1 fix webpage 2023-05-16 00:15:16 +02:00
d18f4e1579 Add clean 2023-05-15 23:51:45 +02:00
8bdaea3910 add remove command 2023-05-15 23:42:18 +02:00
f3cb5c4069 fix parameters 2023-05-15 23:22:41 +02:00
cfb24bed0e add remove parameters 2023-05-15 23:21:25 +02:00
ee8674fd59 add remove class 2023-05-15 23:13:55 +02:00
ece4d78dd8 add remove all 2023-05-14 18:35:36 +02:00
3d7aa19441 add update 2023-05-12 00:16:58 +02:00
3c2f1cc017 separate publication and principal 2023-05-07 17:38:44 +02:00
f9be6770e3 separate article and page 2023-05-07 09:26:48 +02:00
21d2f35e6e add password parameter and fix post to delete 75% 2023-05-04 00:47:06 +02:00
4789fe80aa fix import 50% 2023-05-02 16:59:31 +02:00
3161a06459 Merge pull request 'thread' (#9) from thread into master
Reviewed-on: #9
2023-05-01 20:05:02 +00:00
1f6bd96a8e add del 2023-05-01 21:58:47 +02:00
b359521001 fix from directory import 2023-05-01 21:44:33 +02:00
73c0998ae0 fix thread fromDirectory and fromUrl 2023-05-01 21:18:57 +02:00
939e744d1d remove draft file 2023-05-01 15:45:59 +02:00
0029898e6e add debug message + fix error directory list 2023-05-01 15:45:34 +02:00
ab3720fbbc fix directory in thread 2023-04-29 22:26:47 +02:00
7a1286c4e2 add thread for directory import 2023-04-28 23:37:13 +02:00
5a4bdbb420 add name thread in message logger 2023-04-28 23:14:57 +02:00
bf4c2480f8 import threading for directory WIP 2023-04-27 00:00:53 +02:00
a0b816fe18 add debug thread 2023-04-26 23:03:43 +02:00
08ff16527d fix thread in parallelism 2023-04-25 16:15:45 +02:00
0acd5067cb thread 50% 2023-04-25 00:34:25 +02:00
aaac2385a3 fix previos commit 2023-04-24 23:16:53 +02:00
88f258ffba Add parallelism 2023-04-24 23:15:29 +02:00
a39e2200bd add function 2023-04-22 00:07:54 +02:00
5a5658d955 Merge pull request 'parent-comment' (#8) from parent-comment into master
Reviewed-on: #8
2023-04-20 19:30:45 +00:00
4e6ae92217 add message error and debug for export 2023-04-20 20:53:50 +02:00
34d6cc39d2 add debug message for error request 2023-04-20 20:48:37 +02:00
c44ffc5a86 double comment 2023-04-20 00:08:56 +02:00
ca39826a11 fix comment parent 75% 2023-04-19 23:53:11 +02:00
f8d103ff61 fix add comment 2023-04-19 23:16:39 +02:00
1c252c9a14 replace post by delete 2023-04-19 22:21:15 +02:00
84cc204007 comment update/add in fixing 2023-04-18 22:01:44 +02:00
edb9442b1c add search tags and categories before create tags and categories 2023-04-18 21:50:36 +02:00
d64aed6240 update error message + add debug 2023-04-18 00:00:32 +02:00
a5e7cb89f7 add error status code 2023-04-17 23:44:09 +02:00
ae7cb1e4e0 remove exit useless 2023-04-16 21:26:48 +02:00
4cf301b216 parent comment 90% 2023-04-16 21:25:32 +02:00
581b6941a6 parent id 75% 2023-04-16 21:06:04 +02:00
bd8ac241c1 debug level comment 2023-04-16 19:32:00 +02:00
0e15e88f31 Get level comment 50% 2023-04-16 19:16:23 +02:00
b54785c455 add parent comment WIP 2023-04-14 23:10:07 +02:00
1600a17383 Merge pull request 'retries' (#7) from retries into master
Reviewed-on: #7
2023-04-13 20:16:35 +00:00
74e7f1d74b add try/except for request 2023-04-13 22:14:30 +02:00
225c7ecabb add backoff factor 2023-04-13 21:59:12 +02:00
1311ef2ff2 add retry 2023-04-13 21:54:35 +02:00
f5e82fe4c4 Merge pull request 'insert' (#6) from insert into master
Reviewed-on: #6
2023-04-11 21:29:53 +00:00
76d2771886 remove newline useless 2023-04-11 23:27:41 +02:00
335266e1ad update comment 2023-04-11 23:26:40 +02:00
a856311f04 add method for comment 2023-04-11 22:30:00 +02:00
7848968fa1 Organisation class in a folder 2023-04-11 22:15:36 +02:00
05a3a28c6f add serial for url 2023-04-10 16:36:49 +02:00
7c75116c5b add url list 2023-04-10 16:15:13 +02:00
48e77084e8 remove newline 2023-04-10 16:07:14 +02:00
aa5c8893ec loop for url 2023-04-10 16:02:40 +02:00
4ddc4a7cd3 rm web_scrap + add set url + add backup1 to gitignore 2023-04-10 15:41:14 +02:00
ed78f22f2e fix WPImport from URL 2023-04-10 11:05:32 +02:00
e74dfc2b73 add import from url 2023-04-10 00:00:01 +02:00
cd50e45493 fix WPExport 2023-04-09 23:49:10 +02:00
19c62f38d4 add exit 2023-04-09 22:50:41 +02:00
bba6cd1ca7 add export canalblog 2023-04-09 22:49:44 +02:00
9ed08ea964 add parameter parser 2023-04-09 21:45:51 +02:00
cd6b03b0ff add parameter parser 2023-04-09 21:17:49 +02:00
7e484fa308 add args command name 2023-04-08 23:43:06 +02:00
ebc6206ec9 add subparser export 2023-04-08 23:34:56 +02:00
b3f623cbd5 subparser import 2023-04-08 23:20:52 +02:00
8384dcb2b6 create class WPExport 2023-04-08 22:14:20 +02:00
2289066dd5 rename main file 2023-04-08 21:44:52 +02:00
481fc40929 separate class file for WPimport 2023-04-08 21:27:35 +02:00
6f7504e669 separate file class 2023-04-08 12:27:30 +02:00
d58ead52b2 replace print by logger 2023-04-08 12:17:43 +02:00
9ab33c169e add directory parameter 2023-04-07 22:55:27 +02:00
34115a3a7d recursive functions for directories 2023-04-07 22:38:34 +02:00
1f7e442d04 wip directory 2023-04-06 21:53:56 +02:00
5768b37cd1 ajout de verification de existence fichier 2023-04-06 20:59:11 +02:00
ba511bc6c4 Ajout print image ajoute 2023-04-04 22:14:10 +02:00
665f1474f2 delete and replace image 2023-04-04 22:07:36 +02:00
404ad5dd6c update image not fix 2023-04-04 00:00:28 +02:00
501876dac2 add or update featured media 2023-04-03 23:45:48 +02:00
c9b1264153 remove private method for featured media 2023-04-02 18:01:57 +02:00
f77274f00e add headers json 2023-04-02 17:56:22 +02:00
1e162662e6 add featured media 2023-04-02 17:51:54 +02:00
ec4135c5d0 fix condition type file 2023-04-02 17:36:17 +02:00
cb64dd47ab create private method for add or update media 2023-04-02 17:34:55 +02:00
42b7e7e408 get featured image for post wip 2023-04-02 16:56:07 +02:00
cc33ab34df private variable 2023-04-02 13:14:52 +02:00
f07f8c040f add private method for association id 2023-04-02 13:06:10 +02:00
4054f41e9b add json dumps for post 2023-04-01 18:32:17 +02:00
faa22f1438 update post 2023-04-01 00:18:56 +02:00
bcb3abce01 fix variable 2023-04-01 00:11:33 +02:00
e0b4895b62 association image article 2023-03-31 00:14:38 +02:00
066d8cae52 remove print 2023-03-31 00:06:47 +02:00
90881eb037 add media in body html 2023-03-31 00:05:11 +02:00
c92f24e6af check image exist in media 2023-03-30 23:50:25 +02:00
301f1e2d4b add img successful in media 2023-03-30 23:29:29 +02:00
e1b0c0cba8 img wip 2023-03-29 22:59:15 +02:00
f250637912 add class + wordpress 2023-03-29 22:31:35 +02:00
19229bc65b add .gitgnore + add function 2023-03-28 22:29:55 +02:00
d96d38e508 add author name 2023-03-28 16:43:56 +02:00
dc0fd0c781 insert comment 75% 2023-03-28 16:40:15 +02:00
e3b9e92c23 fix add author 2023-03-28 15:37:48 +02:00
82ce3d1a2b add author for article 2023-03-28 15:28:34 +02:00
605bd06e51 fix space 2023-03-28 12:07:11 +02:00
491f15ae3c premier essai d'insertion d'article reussi 2023-03-28 11:31:25 +02:00
0c41dc3e65 distinct tags and categories 2023-03-27 23:51:51 +02:00
3622e37942 add tags 2023-03-23 23:49:42 +01:00
eae95d5671 add script insert wip 2023-03-23 23:28:57 +01:00
10 changed files with 2400 additions and 245 deletions

5
.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
backup*/
wp-navigation
*.log
__pycache__/
wp-gallery

View File

@@ -3,8 +3,8 @@
TAR=/usr/bin/tar
PYTHON=/usr/bin/python3
GZIP=/usr/bin/gzip
SCRIPTDIR=/home/valentin/script
WEBSCRAP=${SCRIPTDIR}/web_scrap.py
SCRIPTDIR=/home/valentin/script/webscrap
WEBSCRAP=${SCRIPTDIR}/import_export_canalblog.py
URL=www.clarissariviere.com
DATE=$(date +%Y%m%d)
DIRECTORY=/home/valentin/backup
@@ -24,8 +24,8 @@ else
fi
subject="${subject} ${URL} ${DATE}"
echo > ${BACKUPDIR}/${LOGFILE}
${PYTHON} ${WEBSCRAP} --url ${URL} --dir ${DIRECTORY} --quiet --logfile ${BACKUPDIR}/${LOGFILE}
if [ ${?} -ne 0 ]; then
${PYTHON} ${WEBSCRAP} --quiet --logfile ${BACKUPDIR}/${LOGFILE} --parallel 20 export --url ${URL} --directory ${DIRECTORY}
if [ ${?} -ne 0 ]; then
subject="${subject} echoue : recuperation page"
echo ${subject} | mail -s "${subject}" -A ${BACKUPDIR}/${LOGFILE} ${SENDER}
exit 1

374
import_export_canalblog.py Normal file
View File

@@ -0,0 +1,374 @@
#!/usr/bin/python3
from requests.auth import HTTPBasicAuth
from getpass import getpass
from urllib.parse import urlparse
from concurrent import futures
from concurrent.futures import as_completed, wait, ALL_COMPLETED
import argparse, logging, threading, os, glob
from lib.WPImport import WPimport
from lib.WPExport import WPExport
from lib.WPRemove import WPRemove
from lib.WPChange import WPChange
from lib.WPMenu import WPMenu
def errorRevert(logger, revert, tmp):
if revert is True:
files_tmp = glob.glob("{0}/*.json".format(tmp))
if len(files_tmp) == 0:
logger.error("Error revert, because files not found")
exit(1)
if len(files_tmp) != int(args.parallel):
for file_r in files_tmp:
os.remove(file_r)
logger.error("Error revert, because number files tmp is incompatible with parallel number")
exit(1)
def change(index, number, args, logger, tmp, revert):
changeWp = WPChange(logger=logger, index_name=index, number_thread=number, tmp=tmp)
changeWp.fromDirectory(args.directory, revert)
del changeWp
def remove(index, number, args, basic, logger, ssl_wordpress):
removeWp = WPRemove(basic=basic, wordpress="", logger=logger, ssl_wordpress=ssl_wordpress, index_name=index, number_thread=number)
if args.remove == True:
for i in args.wordpress.split(","):
removeWp.setUrl(i)
removeWp.cleanPosts()
removeWp.cleanTags()
removeWp.cleanCategories()
removeWp.cleanMedia()
else:
for i in args.wordpress.split(","):
removeWp.setUrl(i)
if args.posts == True:
removeWp.cleanPosts()
if args.categories == True:
removeWp.cleanCategories()
if args.tags == True:
removeWp.cleanTags()
if args.media == True:
removeWp.cleanMedia()
del removeWp
def download(name_thread, max_thread, url, logger, parser, directory, html, img, ssl_canalblog, revert, tmp):
exportWp = WPExport(name="Thread-{0}".format(int(name_thread) + 1), url=url, logger=logger, parser=parser, directory=directory, ssl_canalblog=ssl_canalblog)
if revert is False:
exportWp.getUrlPage(name_thread, max_thread)
for i in ["article", "page"]:
for j in ["publications", "principal"]:
if html is False:
exportWp.downloadHTML(j, i)
if img is False:
exportWp.downloadImg(j, i)
del exportWp
def importUrl(name_thread, max_thread, canalblog, logger, parser, wordpress, basic, serial, ssl_wordpress, ssl_canalblog, create, update, image, revert, tmp, author):
canalblog = canalblog.split(",")
wordpress = wordpress.split(",")
name = "Thread-{0}".format(int(name_thread) + 1)
protocol = "https"
if ssl_canalblog is False:
protocol = "http"
if serial is False:
for canal in canalblog:
try:
o = urlparse(canal)
o = o._replace(scheme=protocol)
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("{0} : parsing error : {1}".format(name, err))
exit(1)
exportWp = WPExport(name="Thread-{0}".format(int(name_thread) + 1), url=url, logger=logger, parser=parser, ssl_canalblog=ssl_canalblog, tmp=tmp)
if not revert:
exportWp.getUrlPage(name_thread, max_thread)
del exportWp
for j in wordpress:
importWp = WPimport(name=name, basic=basic, wordpress=j, logger=logger, parser=parser, ssl_wordpress=ssl_wordpress, no_create=create, no_update=update, no_image=image, tmp=tmp, author=author)
for k in ["article", "page"]:
for l in ["publications", "principal"]:
importWp.fromUrl(l, k)
del importWp
else:
if len(canalblog) != len(wordpress):
logger.error("{0} : ERREUR : Le nombre de dossier n'est pas equivalent au nombre d'URL wordpress".format(name))
exit(1)
for i in range(0, len(canalblog)-1):
try:
o = urlparse(canalblog[i])
o = o._replace(scheme=protocol)
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
exportWp = WPExport(name=name, url=url, logger=logger, parser=parser, ssl_canalblog=ssl_canalblog)
if not revert:
exportWp.getUrlPage(name_thread, max_thread)
del exportWp
importWp = WPimport(name=name, basic=basic, wordpress=wordpress[i], logger=logger, parser=parser, ssl_wordpress=ssl_wordpress, no_create=create, no_update=update, no_image=image, tmp=tmp, author=author)
for k in ["article", "page"]:
for l in ["publications", "principal"]:
importWp.fromUrl(webpage[l][k])
del importWp
def importDirectory(name_thread, max_thread, directory, logger, parser, wordpress, basic, serial, ssl_wordpress, create, update, image, revert, author):
name = "Thread-{0}".format(int(name_thread) + 1)
directory = directory.split(",")
wordpress = wordpress.split(",")
if serial is False:
for i in wordpress:
importWp = WPimport(name=name, basic=basic, wordpress=i, logger=logger, parser=parser, ssl_wordpress=ssl_wordpress, no_create=create, no_update=update, no_image=image, author=author)
for j in directory:
importWp.fromDirectory(j, name_thread, max_thread, revert)
del importWp
else:
if len(directory) != len(wordpress):
logger.error("{0} : Error : Number directory is different than wordpress".format(name))
exit(1)
for i in range(0, len(wordpress)-1):
importWp = WPimport(name=name, basic=basic, wordpress=wordpress[i], logger=logger, parser=parser, ssl_wordpress=ssl_wordpress, no_create=create, no_update=update, no_image=image, author=author)
importWp.fromDirectory(directory[i], name_thread, max_thread, revert)
del importWp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--quiet", help="No console output", action="store_true")
parser.add_argument("--parser", help="Parser content", default="html.parser")
parser.add_argument("--parallel", help="Define number thread (default : 1)", default=1)
parser.add_argument("--no-ssl", help="No ssl for canalblog and/or wordpress (example wordpress,canalblog)", dest="ssl", default="")
parser.add_argument("--revert", help="Restart a work from stopping work", action="store_true")
parser.add_argument("--tmp", help="directory tmp", default="/tmp/import_export_canablog")
subparsers = parser.add_subparsers(dest="command")
import_parser = subparsers.add_parser("import")
import_parser.add_argument("--user", help="wordpress user", required=True)
import_parser.add_argument("--password", help="password wordpress's user", default="")
import_parser.add_argument("--file", help="HTML file", default="")
import_parser.add_argument("--directory", help="HTML directory", default="")
import_parser.add_argument("--canalblog", help="URL Canalblog", default="")
import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
import_parser.add_argument("--serial", help="Serial execution", action="store_true")
import_parser.add_argument("--remove-all", dest="remove", help="Remove all", action="store_true")
import_parser.add_argument("--remove-posts", help="Remove all posts", dest="posts", action="store_true")
import_parser.add_argument("--remove-categories", help="Remove all categories", dest="categories", action="store_true")
import_parser.add_argument("--remove-tags", help="Remove all tags", dest="tags", action="store_true")
import_parser.add_argument("--remove-media", help="Remove all media", dest="media", action="store_true")
import_parser.add_argument("--no-create", help="No create post", dest="create", default="store_false", action="store_true")
import_parser.add_argument("--no-update", help="No update post", dest="update", default="store_false", action="store_true")
import_parser.add_argument("--no-image", help="No image add or update", dest="image", default="store_false", action="store_true")
import_parser.add_argument("--no-menu", help="No menu add or update", dest="menu", default="store_false", action="store_true")
import_parser.add_argument("--author", dest="author", help="Define author", default="")
remove_parser = subparsers.add_parser("remove")
remove_parser.add_argument("--user", help="wordpress user", required=True)
remove_parser.add_argument("--password", help="password wordpress's user", default="")
remove_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
remove_parser.add_argument("--all", dest="remove", help="Remove all (posts, media, tags, categories)", action="store_true")
remove_parser.add_argument("--posts", help="Remove all posts", action="store_true")
remove_parser.add_argument("--categories", help="Remove all categories", action="store_true")
remove_parser.add_argument("--tags", help="Remove all tags", action="store_true")
remove_parser.add_argument("--media", help="Remove all media", action="store_true")
export_parser = subparsers.add_parser("export")
export_parser.add_argument("--url", help="canblog URL to be scraping", required=True)
export_parser.add_argument("--directory",
default="backup",
help="backup file path")
export_parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
export_parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
export_parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
export_parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
change_parser = subparsers.add_parser("change")
change_parser.add_argument("--directory",
default="",
help="Directory")
change_parser.add_argument("--file",
default="",
help="File")
menu_parser = subparsers.add_parser("menu")
menu_parser.add_argument("--user", help="wordpress user", required=True)
menu_parser.add_argument("--password", help="password wordpress's user", default="")
menu_parser.add_argument("--file", help="HTML file", default="")
menu_parser.add_argument("--canalblog", help="URL Canalblog", default="")
menu_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
args = parser.parse_args()
logger = logging.getLogger('import export canalblog')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ssl_canalblog = True
ssl_wordpress = True
for i in args.ssl.split(","):
if i == "canalblog":
ssl_canalblog = False
if i == "wordpress":
ssl_wordpress = False
if args.quiet is False:
ch = logging.StreamHandler()
if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(args.logfile) > 0:
fileHandler = logging.FileHandler(args.logfile)
if args.debug is True:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
os.makedirs(args.tmp, exist_ok=True)
if args.command == "import" or args.command == "remove" or args.command == "menu":
password = args.password
if len(args.password) == 0:
password = getpass()
if len(password) == 0:
logger.error("No password error !!! ")
exit(1)
basic = HTTPBasicAuth(args.user, password)
if args.command == "import":
wordpress = args.wordpress.split(",")
importWp = WPimport(basic=basic, wordpress="", logger=logger, parser=args.parser, ssl_wordpress=ssl_wordpress, author=args.author, ssl_canalblog=ssl_canalblog)
if len(args.file) > 0:
for i in wordpress:
importWp.setUrl(i)
importWp.fromFile(files=args.file.split(","))
if args.menu is False:
menuWp = WPMenu(name="Thread-1", basic=basic, wordpress=args.wordpress, logger=logger, parser=args.parser, ssl_canalblog=ssl_canalblog, ssl_wordpress=ssl_wordpress)
menuWp.fromFile("{0}".format(args.file.split(",")[0]))
if len(args.directory) > 0:
try:
with futures.ThreadPoolExecutor(max_workers=int(args.parallel)) as ex:
wait_for = [ ex.submit(remove, i, args.parallel, args, basic, logger, ssl_wordpress) for i in range(0, int(args.parallel)) ]
wait(wait_for, return_when=ALL_COMPLETED)
errorRevert(logger, args.revert, args.tmp)
wait_for = [
ex.submit(importDirectory, i, int(args.parallel), args.directory, logger, args.parser, args.wordpress, basic, args.serial, ssl_wordpress, args.create, args.update, args.image, args.revert, args.author)
for i in range(0, int(args.parallel))
]
if args.menu is False:
wait(wait_for, return_when=ALL_COMPLETED)
menuWp = WPMenu(name="Thread-1", basic=basic, wordpress=args.wordpress, logger=logger, parser=args.parser, ssl_canalblog=ssl_canalblog, ssl_wordpress=ssl_wordpress)
menuWp.fromFile("{0}/index.html".format(args.directory))
except Exception as err:
logger.error("Threading error : {0}".format(err))
if len(args.canalblog) > 0:
try:
with futures.ThreadPoolExecutor(max_workers=int(args.parallel)) as ex:
wait_for = [ ex.submit(remove, i, args.parallel, args, basic, logger, ssl_wordpress) for i in range(0, int(args.parallel)) ]
wait(wait_for, return_when=ALL_COMPLETED)
errorRevert(logger, args.revert, args.tmp)
wait_for = [
ex.submit(importUrl, i, int(args.parallel), args.canalblog, logger, args.parser, args.wordpress, basic, args.serial, ssl_wordpress, ssl_canalblog, args.create, args.update, args.image, args.revert, args.tmp, args.author)
for i in range(0, int(args.parallel))
]
if args.menu is False:
wait(wait_for, return_when=ALL_COMPLETED)
menuWp = WPMenu(name="Thread-1", basic=basic, wordpress=args.wordpress, logger=logger, parser=args.parser, ssl_canalblog=ssl_canalblog, ssl_wordpress=ssl_wordpress)
menuWp.fromUrl(args.canalblog)
except Exception as err:
logger.error("Threading error : {0}".format(err))
exit(0)
if args.command == "export":
canalblog = args.url.split(",")
protocol = "https"
if ssl_canalblog is False:
protocol = "http"
exportWp = WPExport(logger=logger, parser=args.parser, directory=args.directory, ssl_canalblog=ssl_canalblog)
for canal in canalblog:
try:
o = urlparse(canal)
o = o._replace(scheme=protocol)
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
exportWp.setUrl(url)
if args.js is False:
exportWp.downloadJs()
if args.css is False:
exportWp.downloadCss()
del exportWp
if args.html is False or args.img is False:
try:
with futures.ThreadPoolExecutor(max_workers=int(args.parallel)) as ex:
wait_for = [
ex.submit(download, i, int(args.parallel), url, logger, args.parser, args.directory, args.html, args.img, ssl_canalblog, args.revert, args.tmp)
for i in range(0, int(args.parallel))
]
except Exception as err:
logger.error("Threading error : {0}".format(err))
exit(0)
if args.command == "remove":
try:
with futures.ThreadPoolExecutor(max_workers=int(args.parallel)) as ex:
wait_for = [ ex.submit(remove, i, args.parallel, args, basic, logger, ssl_wordpress) for i in range(0, int(args.parallel)) ]
except Exception as err:
logger.error("Thread error for remove : {0}".format(err))
exit(0)
if args.command == "change":
if len(args.directory) > 0:
try:
with futures.ThreadPoolExecutor(max_workers=int(args.parallel)) as ex:
errorRevert(logger, args.revert, args.tmp)
wait_for = [ ex.submit(change, i, args.parallel, args, logger, args.tmp, args.revert) for i in range(0, int(args.parallel)) ]
except Exception as err:
logger.error("Thread error for remove : {0}".format(err))
if len(args.file) > 0:
changeWp = WPChange(logger=logger)
for filei in args.file.split(","):
changeWp.fromFile(filei)
exit(0)
if args.command == "menu":
menuWp = WPMenu(name="Thread-1", basic=basic, wordpress=args.wordpress, logger=logger, parser=args.parser, ssl_canalblog=ssl_canalblog, ssl_wordpress=ssl_wordpress)
if len(args.file) > 0:
menuWp.fromFile(args.file)
if len(args.canalblog) > 0:
menuWp.fromUrl(args.canalblog)
exit(0)

173
lib/WPChange.py Normal file
View File

@@ -0,0 +1,173 @@
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, logging, re, json
class WPChange:
# Constructor
def __init__(self, index_name=1, number_thread=1, logger=None, parser="html.parser", tmp="/tmp/import_export_canablog"):
self._name = "Thread-{0}".format(index_name)
self._logger = logger
self._number_thread = number_thread
self._parser = parser
self._tmp = tmp
self._index_name = index_name
# Destructor
def __del__(self):
print("{0} : Import finished".format(self._name))
# Public method
## from file
def fromFile(self, files=[], number_thread=1, max_thread=1):
divFiles = int(len(files) / max_thread)
currentRangeFiles = int(divFiles * (number_thread))
firstRange = int(currentRangeFiles - divFiles)
self._logger.debug("{0} : index : {1}".format(self._name,number_thread))
self._logger.debug("{0} : first range : {1}".format(self._name,firstRange))
self._logger.debug("{0} : last range : {1}".format(self._name,currentRangeFiles))
for i in range(firstRange, currentRangeFiles):
if os.path.exists(files[i]):
self._logger.info("{0} : ({1}/{2}) File is being processed : {3}".format(self._name, i+1, currentRangeFiles + 1, files[i]))
self._change(files[i])
## From directory
def fromDirectory(self, directory="", revert=False):
self._directory = directory
directory = "{0}/archives".format(directory)
directories = self._getDirectories([], "{0}".format(directory))
if len(directories) > 0:
files = self._getFiles(directories)
if revert is False:
self._tmpFiles(files=files, number_thread=self._index_name, max_thread=self._number_thread)
self._fromFileTmp()
else:
self._logger.error("{0} : No files for {1}".format(self._name, directory))
def fromFile(self, files=[]):
for i in range(0, len(files)):
if os.path.exists(files[i]):
self._logger.info("{0} : ({1}/{2}) File is being processed : {3}".format(self._name, i+1, len(files), files[i]))
self._change(files[i])
# Private method
def _fromFileTmp(self):
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
files = json.loads(file.read())
self._logger.debug("{0} : size of webpage : {1}".format(self._name, len(files)))
for i in range(0, len(files)):
if os.path.exists(files[i]):
self._logger.info("{0} : ({1}/{2}) File is being processed : {3}".format(self._name, i+1, len(files), files[i]))
self._change(files[i])
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
def _tmpFiles(self, files=[], number_thread=1, max_thread=1):
print()
divFiles = int(len(files) / int(max_thread))
currentRangeFiles = int(divFiles * (int(number_thread)+1))
firstRange = int(currentRangeFiles - divFiles)
self._logger.debug("{0} : index : {1}".format(self._name,number_thread))
self._logger.debug("{0} : first range : {1}".format(self._name,firstRange))
self._logger.debug("{0} : last range : {1}".format(self._name,currentRangeFiles))
webpage = []
for i in range(firstRange, currentRangeFiles):
webpage.append(files[i])
try:
string_webpage = json.dumps(webpage)
open("{0}/{1}.json".format(self._tmp, self._name), "wt").write(string_webpage)
except Exception as ex:
self._logger.error("{0} : Error for writing webpage : {1}".format(self._name, ex))
## Get all files
def _getFiles(self, item):
files = []
for i in item:
for j in os.listdir(i):
if os.path.isfile("{0}/{1}".format(i, j)):
files.append("{0}/{1}".format(i, j))
return files
## Get directories
def _getDirectories(self, subdirectory, item):
sub = subdirectory
for i in os.listdir(item):
if os.path.isdir("{0}/{1}".format(item, i)):
sub.append("{0}/{1}".format(item, i))
subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
return subdirectory
## Change path img file
def _change(self, file):
ext_img = ["png", "svg", "gif", "jpg", "jpeg"]
try:
with open(file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, self._parser)
img = soup.find_all("img")
for i in img:
src = i.get("src")
o = urlparse(src)
if len(o.netloc) > 0:
self._logger.info("{0} : Change source image {1} /img/{2}/{3}".format(self._name, src, o.netloc, o.path))
content = content.replace(src, "/img/{0}/{1}".format(o.netloc, o.path))
script = soup.find_all("script", {"type": "text/javascript"})
for i in script:
src = i.get("src")
if src is not None:
o = urlparse(src)
if len(o.netloc) > 0:
self._logger.info("{0} : Change source js {1} /dists/js/{2}/{3}".format(self._name, src, o.netloc, o.path))
content = content.replace(src, "/dists/js/{0}/{1}".format(o.netloc, o.path))
link = soup.find_all("link", {"rel": "stylesheet"})
for i in link:
href = i.get("href")
if href is not None:
o = urlparse(href)
if len(o.netloc) > 0:
self._logger.info("{0} : Change source css {1} /dists/css/{2}/{3}".format(self._name, href, o.netloc, o.path))
content = content.replace(href, "/dists/css/{0}/{1}".format(o.netloc, o.path))
a = soup.find_all("a", {"target": "_blank"})
for i in a:
href = i.get("href")
if href is not None:
o = urlparse(href)
if len(o.netloc) > 0:
ext = o.path.split(".")[len(o.path.split("."))-1]
if ext in ext_img:
self._logger.info("{0} : Change a img {1} /img/{2}/{3}".format(self._name, href, o.netloc, o.path))
content = content.replace(href, "/img/{0}/{1}".format(o.netloc, o.path))
try:
with open(file, "w") as f:
self._logger.info("{0} : File write : {1}".format(self._name, file))
f.write(content)
except Exception as ex:
self._logger.error("{0} : Error for write file {1} : {2}".format(self._name, file, ex))
except Exception as ex:
self._logger.error("{0} : Error for read file {1} : {2}".format(self._name, file, ex))

326
lib/WPExport.py Normal file
View File

@@ -0,0 +1,326 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging, json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class WPExport:
def __init__(self, name = "Thread-0", url = "", logger = None, parser = "html.parser", directory = "backup", ssl_canalblog=True, tmp="/tmp/import_export_canablog"):
self._url = url
self._logger = logger
self._parser = parser
self._dir = directory
self._name = name
self._protocol = "https"
if ssl_canalblog is False:
self._protocol = "http"
self._request = requests.Session()
retries = Retry(total=10,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
self._request.mount('{0}://'.format(self._protocol), HTTPAdapter(max_retries=retries))
self._tmp = tmp
# Destructor
def __del__(self):
self._logger.info("{0} : Export finished for {1}".format(self._name, self._url))
# Public method
# Set name
def setName(self, name):
self._name = "Thread-{0}".format(int(name) + 1)
# Set URL
def setUrl(self, url):
self._url = url
# Download JS
def downloadJs(self):
script = self._getScriptCss(True, False)
o = urlparse(self._url)
self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))
# Download CSS
def downloadCss(self):
css = self._getScriptCss(False, True)
o = urlparse(self._url)
self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
# Download HTML
def downloadHTML(self, first, second):
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
webpage = json.loads(file.read())
self._downloadPage(webpage[first][second], self._dir)
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
# Download Image
def downloadImg(self, first, second):
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
webpage = json.loads(file.read())
page_src = self._getImg(webpage[first][second])
o = urlparse(self._url)
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
# Get URL
def getUrlPage(self, index_thread, max_thread):
try:
page = self._request.get(self._url)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
else:
self._logger.error("{0} : URL did not get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error : {1}".format(self._name, err))
webpage = {"principal": {"page":[], "article":[]}, "publications": {"page":[], "article":[]}}
for i in page_url:
section = "publications"
o = urlparse(i)
o = o._replace(scheme=self._protocol)
i = o.geturl().replace(":///", "://")
if i == "{0}/".format(self._url):
section = "principal"
try:
page = self._request.get(i)
if page.status_code == 200:
self._logger.info("{0} : page : {1}".format(self._name, i))
if i not in webpage[section]["page"]:
webpage[section]["page"].append(i)
soup = BeautifulSoup(page.text, self._parser)
class_div = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
self._logger.debug("{0} : Last page {1}".format(self._name, lastpage))
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
setPageDivided = int(number_lastpage) / max_thread
if setPageDivided > int(setPageDivided):
setPageDivided = setPageDivided + 1
setPagePart = setPageDivided * (index_thread + 1)
firstPagePart = (setPagePart - setPageDivided)
self._logger.debug("{0} : Total page : {1}".format(self._name,int(number_lastpage)))
self._logger.debug("{0} : First range : {1}".format(self._name, int(firstPagePart)))
self._logger.debug("{0} : Last range : {1}".format(self._name, int(setPagePart)))
for j in range(int(firstPagePart),int(setPagePart)+1):
paging = j * 10
categorie = urlparse(i).path.split("/")
url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
if len(categorie) > 2:
url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
self._logger.info("{0} : {1}".format(self._name, url_paging))
if url_paging not in webpage[section]["page"]:
webpage[section]["page"].append(url_paging)
page = self._request.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
h2 = soup.find_all("h2")
self._logger.debug("{0} : {1} H2 : {2}".format(self._name, url_paging, h2))
for title in h2:
self._logger.debug("{0} : {1} a : {2}".format(self._name, url_paging, title.find_all("a")))
href = title.find_all("a")[0].get("href", "/")
if href not in webpage[section]["article"]:
try:
o = urlparse(href)
o = o._replace(scheme="https").geturl()
webpage[section]["article"].append(o)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
else:
self._logger.error("{0} : web didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error : {1}".format(self._name, err))
exit(1)
try:
string_webpage = json.dumps(webpage)
open("{0}/{1}.json".format(self._tmp, self._name), "wt").write(string_webpage)
except Exception as ex:
self._logger.error("{0} : Error for writing webpage : {1}".format(self._name, ex))
# Private method
#
# Create path
def _mkdirPath(self, path_dir):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
self._logger.debug("{0} : Dossier crée : {1}".format(self._name, repath))
try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
self._logger.error("Directory error : {0}".format(err))
self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
# Get Css and JS
def _getScriptCss(self, js, css):
try:
page = self._request.get(self._url)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
u = urlparse(self._url)
o = urlparse(src)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
u = urlparse(self._url)
o = urlparse(href)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)
else:
self._logger.error("JS or CSS did not get due status code : {0}".format(page.status_code))
self._logger.debug(page.content)
except ConnectionError as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
except Exception as err:
self._logger.error("Exception error : {0}".format(err))
return page_url
# Get image
def _getImg(self, webpage):
page_img = []
for i in webpage:
try:
page = self._request.get(i)
if page.status_code == 200:
soup = BeautifulSoup(page.text, self._parser)
img = soup.find_all("img")
self._logger.info("{0} : image from page: {1} : ".format(self._name,i))
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
self._logger.info("{0} : image: {1} : ".format(self._name, src))
page_img.append(src)
else:
self._logger.error("{0} : Image did not get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error : {1}".format(self._name, err))
return page_img
# Download page
def _downloadPage(self, webpage, backup_dir):
for i in range(0, len(webpage)):
try:
o = urlparse(webpage[i])
path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
try:
r = self._request.get(webpage[i])
if r.status_code == 200:
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
self._logger.info("{0} : {1}/{2} : {3}".format(self._name, i+1, len(webpage), fileDownload))
try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
self._logger.error("file error : {0}".format(err))
exit(1)
else:
self._logger.error("Not download due status code : {0}".format(r.status_code))
self._logger.debug(r.content)
except ConnectionError as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} Exception error : {1}".format(self._name, err))
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
exit(1)

996
lib/WPImport.py Normal file
View File

@@ -0,0 +1,996 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, logging, re, json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class WPimport:
# Constructor
def __init__(self, name="Thread-0", basic=None, wordpress="", logger=None, parser="html.parser", ssl_wordpress=True, no_create=False, no_update=False, no_image=False, tmp="/tmp/import_export_canablog", author="", ssl_canalblog=True):
self._name = name
self._basic = basic
self._wordpress = wordpress
self._logger = logger
self._parser = parser
self._headers_json = {'Content-Type': 'application/json; charset=utf-8', 'Accept':'application/json'}
self._protocol = "https"
self._directory = "backup"
if ssl_wordpress is False:
self._protocol = "http"
self._request = requests.Session()
self._ssl_canalblog = ssl_canalblog
retries = Retry(connect=10, read=10, redirect=5,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
self._request.mount('{0}://'.format(self._protocol), HTTPAdapter(max_retries=retries))
self._no_create = no_create
self._no_update = no_update
self._no_image = no_image
self._tmp = tmp
self._author = author
# Destructor
def __del__(self):
print("{0} : Import finished for {1}".format(self._name, self._wordpress))
# Public method
def setUrl(self, wordpress):
self._wordpress = wordpress
def fromUrl(self, first, second):
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
webpage_content = json.loads(file.read())
self._logger.debug("{0} : size of webpage : {1}".format(self._name, len(webpage_content)))
webpage = webpage_content[first][second]
for i in range(0, len(webpage)):
try:
r = self._request.get(webpage[i])
if r.status_code == 200:
self._logger.info("{0} : ({1}/{2}) : Page is importing : {3}".format(self._name, i+1, len(webpage), webpage[i]))
soup = BeautifulSoup(r.content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
else:
albumbody = soup.find_all("div", class_="albumbody")
if len(albumbody) > 0:
self._addOrUpdateAlbum(soup)
else:
self._addOrUpdateFeaturedMedia(soup)
del webpage_content[first][second][i]
webpage_content = json.dumps(webpage_content)
open("{0}/{1}.json".format(self._tmp, self._name), "wt").write(webpage_content)
else:
self._logger.error("{0} : Connection error for get url {1} with status code : {2}".format(self._name, webpage[i], r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get url {1} : {2}".format(self._name, webpage[i], err))
exit(1)
except IOError as err:
self._logger.error("{0} : Connection error for IO url {1} : {2}".format(self._name, webpage[i], err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get url {1} : {2}".format(self._name, webpage[i], err))
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
def fromDirectory(self, directory="", number_thread=1, max_thread=1, revert=False):
self._directory = directory
directory = "{0}/archives".format(directory)
directories = self._getDirectories([], "{0}".format(directory))
if len(directories) > 0:
files = self._getFiles(directories)
if revert is False:
self._tmpFiles(files=files, number_thread=number_thread, max_thread=max_thread)
self._fromFileTmp()
else:
self._logger.error("{0} : No files for {1}".format(self._name, directory))
def fromFile(self, files=[]):
for i in range(0, len(files)):
if os.path.exists(files[i]):
self._logger.info("{0} : ({1}/{2}) File is being processed : {3}".format(self._name, i+1, len(files), files[i]))
with open(files[i], 'r') as f:
content = f.read()
self._logger.debug("{0} : Size of article : {1}".format(self._name, len(content)))
soup = BeautifulSoup(content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
self._logger.debug("{0} : Number of article : {1}".format(self._name, len(articlebody)))
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
else:
albumbody = soup.find_all("div", class_="albumbody")
if len(albumbody) > 0:
self._addOrUpdateAlbum(soup)
else:
self._addOrUpdateFeaturedMedia(soup)
# Private method
def _getAuthor(self, author):
params = {"search":author, "per_page":100}
author = 0
try:
self._logger.info("{0} : Search author : {1}".format(self._name, author))
page = self._request.get("{1}://{0}/wp-json/wp/v2/users".format(self._wordpress, self._protocol), auth=self._basic, headers=self._headers_json, params=params)
self._logger.debug("{0} : End Search author : {1}".format(self._name, author))
self._logger.debug("{0} : Debug requests : {1}".format(self._name, page.content))
if page.status_code == 200:
self._logger.info("{0} : Get author : {1}".format(self._name, author))
result = page.json()
for a in result:
author = a["id"]
else:
self._logger.error("{0} : Connection error with status code for get author : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get author : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get author : {1}".format(self._name, err))
return author
def _getInfoAlbum(self, link):
if self._ssl_canalblog:
link = link.replace("http", "https").replace("httpss", "https")
self._logger.info("{0} : Info album : {1}".format(self._name, link))
link_o = urlparse(link)
if len(link_o.netloc) > 0:
self._logger.info("{0} : get album info from web : {1}".format(self._name, link_o))
try:
response = self._request.get(link)
if response.status_code == 200:
self._logger.info("{0} : get content info from web : {1}".format(self._name, link))
page_img = response.content
except ConnectionError as err:
self._logger.error("{0} : Connection error for get album info : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get album info : {1}".format(self._name, err))
exit(1)
else:
self._logger.info("{0} : get album info from file : {1}".format(self._name, link_o))
if os.path.exists("{0}/..{1}".format(self._directory, link_o)):
page_img = open("{0}/..{1}".format(self._directory, link_o), "r")
soup = BeautifulSoup(page_img, self._parser)
paragraphs = soup.find("div", class_="albumbody").find_all("p")
for paragraph in paragraphs:
self._logger.info("{0} get paragraph : {1}".format(self._name, paragraph))
split_paragraph = str(paragraph).split("<br>")
self._logger.info("{0} length paragraph splitted : {1}".format(self._name, len(split_paragraph)))
if len(split_paragraph) == 1:
split_paragraph = str(paragraph).split("<br/>")
self._logger.info("{0} get paragraph splitted : {1}".format(self._name, split_paragraph))
if len(split_paragraph) > 1:
if len(split_paragraph[1].split(":")) > 1:
author = split_paragraph[1].split(":")[1].replace(" ", "").lower()
return author
def _addOrUpdateAlbum(self, soup):
self._logger.info("{0} : Add/Update Album".format(self._name))
albumbody = soup.find("div", class_="albumbody")
albumtitle = albumbody.find("h2").get_text()
self._logger.debug("{0} : Title of the album : {1}".format(self._name, albumtitle))
albumdesc = albumbody.find("div", class_="albumdesc").find("p")
img_a = albumbody.find_all("img")
list_img = []
page_is_exist = False
if self._no_image is False:
self._logger.debug("{0} : Number of image's tag : {1}".format(self._name, len(img_a)))
for i in img_a:
new_img = {}
href_img = i.get("src")
href_img_o = urlparse(href_img)
try:
if len(href_img_o.netloc) > 0:
img_ok = False
href_img = href_img.replace("_q", "_o")
page_img = self._request.get(href_img)
if page_img.status_code == 200:
img_ok = True
else:
if os.path.exists("{0}/..{1}".format(self._directory, href_img)):
page_img = open("{0}/..{1}".format(self._directory, href_img), "r")
img_ok = True
self._logger.debug("{0} : Status code for image {1} : {2}".format(self._name, href_img, page_img.status_code))
if img_ok is True:
media=self._addOrUpdateMedia(href_img, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
list_img.append(new_img)
except ConnectionError as err:
self._logger.error("{0} : Connection error for get image : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get image : {1}".format(self._name, err))
exit(1)
self._logger.debug("{0} content img : {1}".format(self._name, list_img))
content_html = ""
if len(list_img) > 0:
content_html = "<!-- wp:paragraph -->\n{0}\n<!-- /wp:paragraph -->\n\n".format(albumdesc)
for i in range(0, len(list_img)):
content_html = content_html + "<!-- wp:image {\"id\":id-image,\"sizeSlug\":\"large\",\"linkDestination\":\"none\"} --><figure class=\"wp-block-image size-large\"><img src=\"src-image\" alt=\"\" class=\"wp-image-id-image\"/></figure><!-- /wp:image -->\n\n".replace("id-image", str(list_img[i]["id"])).replace("src-image", list_img[i]["new_src"])
self._logger.info("{0} : content html : {1}".format(self._name, content_html))
if len(content_html) > 0:
data = {"title":albumtitle, "content":content_html, "status":"publish"}
if len(self._author) > 0:
author = self._getAuthor(self._author)
else:
link_a = albumbody.find_all("a")
for i in link_a:
if re.search(r"/albums/", i.get("href", "/")):
href_a = i.get("href", "/")
break
author = self._getInfoAlbum(href_a)
self._logger.info("{0} : author : {1}".format(self._name, author))
author = self._getAuthor(author)
data = {"title":albumtitle, "content":content_html, "status":"publish"}
if author != 0:
data = {"title":albumtitle, "content":content_html, "status":"publish", "author":author}
self._logger.debug("{0} : data for album page : {1}".format(self._name, data))
for index in range(1,10):
params = {"search": albumtitle, "per_page":100, "page": index}
try:
self._logger.info("{0} : Search post with index {2} : {1}".format(self._name, albumtitle, index))
page = self._request.get("{1}://{0}/wp-json/wp/v2/pages".format(self._wordpress, self._protocol), auth=self._basic, params=params, headers=self._headers_json)
if page.status_code == 200:
self._logger.debug("{0} : Encoding : {1}".format(self._name, page.encoding))
page.encoding = "utf-8"
result = page.json()
if len(result) == 0:
break
self._logger.info("{0} : Number result posts : {1}".format(self._name, len(result)))
count = 0
for i in result:
title_rendered = i["title"]["rendered"]
self._logger.info("{0} : Search title pages for |{2}| : |{1}|".format(self._name, title_rendered, albumtitle))
if len(title_rendered) != len(albumtitle):
title_rendered = self._replaceCaracter(title_rendered)
self._logger.debug("{0} : Search title pages for |{2}| : |{1}|".format(self._name, title_rendered, albumtitle))
self._logger.debug("{0} : SIze of title : {1} - {2}".format(self._name, len(albumtitle), len(title_rendered)))
if title_rendered == albumtitle:
if self._no_update is False:
page_is_exist = True
post_id = i["id"]
count = count + 1
if count > 1:
self._logger.info("{0} : Page {1} is double and going to delete".format(self._name, albumtitle))
try:
params = {"force":1}
page = self._request.delete("{2}://{0}/wp-json/wp/v2/pages/{1}".format(self._wordpress, post_id, self._protocol), auth=self._basic, headers=self._headers_json, params=params)
if page.status_code == 200:
self._logger.info("{0} : Page deleted : {1}".format(self._name, albumtitle))
else:
self._logger.error("{0} : Page not updated due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for deleted page : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for deleted page : {1}".format(self._name, err))
else:
self._logger.debug("{0} : Data for page to update : {1}".format(self._name, i))
self._logger.info("{0} : Page {1} already exist and going to update".format(self._name, albumtitle))
try:
page = self._request.post("{2}://{0}/wp-json/wp/v2/pages/{1}".format(self._wordpress, post_id, self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
self._logger.info("{0} : page updated : {1}".format(self._name, albumtitle))
else:
self._logger.error("{0} : page not updated due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for update page : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for update page : {1}".format(self._name, err))
elif page.status_code == 400:
self._logger.error("{0} : Connection for update post unauthorized : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
break
else:
self._logger.error("{0} : Connection for update page error with status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for search page : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for search page : {1}".format(self._name, err))
if page_is_exist is False and self._no_create is False:
try:
self._logger.info("{0} : Creating page : {1}".format(self._name, data["title"]))
page = self._request.post("{1}://{0}/wp-json/wp/v2/pages".format(self._wordpress, self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
self._logger.info("{0} : page added : {1}".format(self._name, result["title"]["raw"]))
else:
self._logger.error("{0} : page not added due status code : {1}".format(self._name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for create page : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for create page : {1}".format(self._name, err))
def _fromFileTmp(self):
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
files = json.loads(file.read())
self._logger.debug("{0} : size of webpage : {1}".format(self._name, len(files)))
for i in range(0, len(files)):
if os.path.exists(files[i]):
self._logger.info("{0} : ({1}/{2}) File is being processed : {3}".format(self._name, i+1, len(files), files[i]))
with open(files[i], 'r') as f:
content = f.read()
self._logger.debug("{0} : Size of article : {1}".format(self._name, len(content)))
soup = BeautifulSoup(content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
self._logger.debug("{0} : Number of article : {1}".format(self._name, len(articlebody)))
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
else:
self._addOrUpdateFeaturedMedia(soup)
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
def _tmpFiles(self, files=[], number_thread=1, max_thread=1):
divFiles = int(len(files) / max_thread)
currentRangeFiles = int(divFiles * (number_thread+1))
firstRange = int(currentRangeFiles - divFiles)
self._logger.debug("{0} : index : {1}".format(self._name,number_thread))
self._logger.debug("{0} : first range : {1}".format(self._name,firstRange))
self._logger.debug("{0} : last range : {1}".format(self._name,currentRangeFiles))
webpage = []
for i in range(firstRange, currentRangeFiles):
webpage.append(files[i])
try:
string_webpage = json.dumps(webpage)
open("{0}/{1}.json".format(self._tmp, self._name), "wt").write(string_webpage)
except Exception as ex:
self._logger.error("{0} : Error for writing webpage : {1}".format(self._name, ex))
## replace caracter
def _replaceCaracter(self, title_rendered):
list_replace = {'&rsquo;': "'", '&#8211;': '-', '&#8230;': '...', '« ': '"', ' »': '"', '« ': '"', ' »': '"', '&#8217;': "'", '"&lsquo;': "'"}
for old, new in list_replace.items():
title_rendered = title_rendered.replace(old, new)
return title_rendered
## remove space
def _removeSpace(self, title):
if title[len(title)-1] == " ":
title = title[:-1]
if title[0] == " ":
title = title[1:]
return title
## Get all files
def _getFiles(self, item):
files = []
for i in item:
for j in os.listdir(i):
if os.path.isfile("{0}/{1}".format(i, j)):
files.append("{0}/{1}".format(i, j))
return files
## Get directories
def _getDirectories(self, subdirectory, item):
sub = subdirectory
for i in os.listdir(item):
if os.path.isdir("{0}/{1}".format(item, i)):
sub.append("{0}/{1}".format(item, i))
subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
return subdirectory
def _linkFeaturedMedia(self, img_src, h2, result):
try:
page = self._request.get(img_src)
if page.status_code == 200:
name_img = img_src.replace("_q", "")
name_img = name_img.split("/")[len(name_img.split("/"))-1]
params = {"search": name_img}
try:
page = self._request.get("{1}://{0}/wp-json/wp/v2/media".format(self._wordpress, self._protocol), auth=self._basic, params=params)
if page.status_code == 200:
res = page.json()
if len(res) > 0:
id_media = res[0]["id"]
data = {"featured_media": id_media}
try:
r = self._request.post("{2}://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"], self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if r.status_code == 200:
self._logger.info("{0} : Add media featured : {1}".format(self._name, r.json()["title"]["raw"]))
else:
self._logger.error("{0} : Connection error with status code for featured media : {1}".format(self._name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for post media featured : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for post media featured : {1}".format(self._name, err))
else:
self._logger.info("{0} : No media found for {1}".format(self._name, h2))
try:
page = self._request.get(img_src.replace("_q", ""))
if page.status_code == 200:
self._logger.info("{0} : Get image for media featured : {1}".format(self._name, img_src.replace("_q", "")))
media = self._addOrUpdateMedia(img_src.replace("_q", ""), page)
if media["id"] != "":
self._linkFeaturedMedia(img_src.replace("_q", ""), h2)
else:
self._logger.error("{0} : Connection error with status code for get image for featured media : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get to image for featured media : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get to image media featured : {1}".format(self._name, err))
else:
self._logger.error("{0} : Connection error with status code for search featured media: {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error search featured media : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error search featured media : {1}".format(self._name, err))
else:
self._logger.error("{0} : Connection error for get featured media with status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get featured media : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get featured media : {1}".format(self._name, err))
## Add or update featured media
def _addOrUpdateFeaturedMedia(self, soup):
item_div = soup.find_all("div", {"data-edittype": "post"})
for i in item_div:
h2 = i.find_all("h2")[0].text
params = {"search":h2, "type":"post"}
try:
page = self._request.get("{1}://{0}/wp-json/wp/v2/search".format(self._wordpress, self._protocol), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
if len(result) > 0:
if h2 == self._replaceCaracter(result[0]["title"]):
img = i.find_all("img")
if len(img) > 0:
img_src = img[0].get("src")
self._linkFeaturedMedia(img_src, h2, result)
else:
self._logger.error("{0} : Connection error with status code for featured media : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Connection error : {1}".format(self._name, err))
## Association image to post
def _linkImgPost(self, title, list_img, post_id):
for i in list_img:
data = {"post": post_id}
try:
r = self._request.post("{2}://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"], self._protocol), auth=self._basic, data=data)
if r.status_code == 200:
self._logger.info("{0} : Link image to post {1}".format(self._name, title))
else:
self._logger.error("{0} Connection error with status code for link image to post : {1}".format(self._name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for link image to post : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for link image to post : {1}".format(self._name, err))
## Add or update img
def _addOrUpdateMedia(self, href_img, page):
media_authorized = ["png", "jpg", "jpeg", "svg", "gif"]
media = {"id":"", "rendered":""}
split_fileimg = href_img.split("/")
img_name = split_fileimg[len(split_fileimg)-1]
img_type_file = img_name.split(".")[len(img_name.split("."))-1]
is_img = True
if img_type_file not in media_authorized:
self._logger.error("{0} : Element {1} is not image".format(self._name,img_name))
is_img = False
if is_img is True:
self._logger.debug("{0} : Search for image {1} with URL {2}".format(self._name, img_name, "{1}://{0}/wp-json/wp/v2/media".format(self._wordpress, self._protocol)))
params = { "search": img_name}
try:
r = self._request.get("{1}://{0}/wp-json/wp/v2/media".format(self._wordpress, self._protocol), auth=self._basic, params=params)
self._logger.debug("{0} : Search for image {1} and his status code {2}".format(self._name, img_name, r.status_code))
if r.status_code == 200:
res = r.json()
self._logger.debug("{0} : Number of image in search : {1}".format(self._name, len(res)))
if len(res) > 0:
params = {"force":1}
try:
r = self._request.delete("{2}://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"], self._protocol), auth=self._basic, params=params)
if r.status_code == 200:
self._logger.info("{0} : Image removed {1}".format(self._name, img_name))
else:
self._logger.error("{0} : Image {1} not removed due status code : {2}".format(self._name, img_name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} Connection error for delete image : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} Exception error for delete image : {1}".format(self._name, err))
data = page.content
img_type = "image/{0}".format(img_type_file)
if img_type_file == "jpg":
img_type = "image/jpeg"
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
try:
r = self._request.post("{1}://{0}/wp-json/wp/v2/media".format(self._wordpress, self._protocol), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
self._logger.info("{0} : Image added {1}".format(self._name, img_name))
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
else:
self._logger.error("{0} : Image {1}.{2} not added due status code : {3}".format(self._name, img_name, img_type, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for add image : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for add image : {1}".format(self._name, err))
exit(1)
else:
self._logger.error("{0} : Connection error for search image with status code : {1}".format(self._name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for search media : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for search media : {1}".format(self._name, err))
return media
## Add or update comment
def _addOrUpdateComment(self, post, comment, title):
for i in comment:
try:
params = {"post": post, "author_name":i["author"], "date":i["date"]}
page = self._request.get("{1}://{0}/wp-json/wp/v2/comments".format(self._wordpress, self._protocol), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
for j in result:
try:
params = {"force":1}
page = self._request.delete("{2}://{0}/wp-json/wp/v2/comments/{1}".format(self._wordpress, j["id"], self._protocol), params=params, auth=self._basic)
if page.status_code == 200:
self._logger.info("{0} : Comment deleted for {1}".format(self._name, title))
self._logger.debug("{0} : Comment deleted : {1}".format(self._name, j))
else:
self._logger.error("{0} : Comment not deleted for {1} due status code : {2}".format(self._name, title, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for delete comment : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for delete comment : {1}".format(self._name, err))
else:
self._logger.error("{0} : Comment not listed for {1} due status code : {2}".format(self._name, title, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for search comment : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for search comment : {1}".format(self._name, err))
for i in comment:
data = {"post": post, "content": i["content"], "date": i["date"], "author_name": i["author"], "status": "approved"}
if i["parent_id"] != -1:
parent_id = int(i["parent_id"])
params = {"post": post, "author_name":comment[parent_id]["author"], "date":comment[parent_id]["date"]}
try:
page = self._request.get("{1}://{0}/wp-json/wp/v2/comments".format(self._wordpress, self._protocol), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
if len(result) > 0:
data["parent"]=result[0]["id"]
else:
self._logger.error("{0} : Connection error for parent comment with status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for parent comment : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for parent comment : {1}".format(self._name, err))
try:
page = self._request.post("{1}://{0}/wp-json/wp/v2/comments".format(self._wordpress, self._protocol), auth=self._basic, data=data)
if page.status_code == 201:
self._logger.info("{0} : Comment added for {1}".format(self._name, title))
self._logger.debug("{0} : Data : {1}".format(self._name, data))
else:
self._logger.error("{0} : Comment not added for {1} due status code : {2}".format(self._name, title, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for add comment : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for add comment : {1}".format(self._name, err))
exit(1)
## Check class name
def _hasClassName(self, tag, className):
for i in tag["class"]:
if i == className:
return True
return False
## Get class name
def _getClassName(self, tag, className):
for i in tag["class"]:
if re.match(className, i):
return i
return ""
## Get all comments
def _getComment(self, comment):
comment_post = []
for i in range(0, len(comment)):
comment_div = comment[i].find("div", class_="comment_item")
comment_item = comment_div.text.split("\n")
footer = comment_div.find_all("div", class_="itemfooter")
comment_author = footer[0].text.split(",")[0].replace("Posté par ", "")
comment_date = footer[0].find_all("abbr")[0].get("title")
comment_content = "<p>"
for j in range(0, len(comment_item)-2):
if len(comment_item[j]) > 0:
comment_content = comment_content + comment_item[j] + "<br />"
comment_content = comment_content + "</p>"
parent = -1
if self._hasClassName(comment[i], "level-1") is False:
block = False
className = self._getClassName(comment[i], "level-").split("-")
level = 1
if len(className) > 0:
level = int(className[1])
for j in range(i-1, 0, -1):
if block is False:
levelName = "level-{0}".format(level - 1)
if self._hasClassName(comment[j], levelName) is True:
parent = j
block = True
comment_post.append({"author": comment_author, "date": comment_date, "content": comment_content, "parent_id":parent})
return comment_post
## Add or Update post
def _addOrUpdatePost(self, soup):
tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
liste = ["categories", "tags"]
elements = {}
element = {}
listelement = {}
for i in liste:
element[i] = []
listelement[i] = []
articletitle = soup.find_all("h2", class_="articletitle")
self._logger.debug("{0} : Title of the article : {1}".format(self._name, articletitle))
articlebody = soup.find_all("div", class_="articlebody")
articledate = soup.find_all("span", class_="articledate")
articleacreator = soup.find_all("span", class_="articlecreator")
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
comment = soup.find_all("li", class_="comment")
img_a = articlebody[0].find_all("a", {"target": "_blank"})
self._logger.debug("{0} : Number of image's link : {1}".format(self._name, len(img_a)))
list_img = []
if self._no_image is False:
for i in img_a:
new_img = {}
img = i.find_all("img")
self._logger.debug("{0} : Number of image's tag : {1}".format(self._name, len(img)))
if len(img) > 0:
href_a = i.get("href")
href_img = img[0].get("src")
href_a_o = urlparse(href_a)
href_img_o = urlparse(href_img)
new_img["old_src"]=href_img
new_img["old_href"]=href_a
try:
if len(href_img_o.netloc) > 0:
img_ok = False
page_img = self._request.get(href_img)
if page_img.status_code == 404:
href_img = href_a
try:
page_img = self._request.get(href_a)
if page_img.status_code == 200:
img_ok = True
except ConnectionError as err:
self._logger.error("{0} : Connection error for get image : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get image : {1}".format(self._name, err))
exit(1)
else:
if os.path.exists("{0}/..{1}".format(self._directory, href_img)):
page_img = open("{0}/..{1}".format(self._directory, href_img), "r")
img_ok = True
else:
if os.path.exists("{0}/..{1}".format(self._directory, href_a)):
page_img = open("{0}/..{1}".format(self._directory, href_a), "r")
img_ok = True
self._logger.debug("{0} : Status code for image {1} : {2}".format(self._name, href_img, page_img.status_code))
if img_ok is True:
media=self._addOrUpdateMedia(href_img, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
list_img.append(new_img)
if href_img != href_a:
media=self._addOrUpdateMedia(href_a, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
list_img.append(new_img)
if page_img.status_code not in [200, 404]:
self._logger.error("{0} : Connection error with status code for get image : {1}".format(self._name, page_img.status_code))
self._logger.debug("{0} : {1}".format(self._name, page_img.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get image : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get image : {1}".format(self._name, err))
exit(1)
self._logger.debug("{0} : Number of image : {1}".format(self._name, len(list_img)))
comment_post = self._getComment(comment)
a = itemfooter[0].find_all("a", {"rel": True})
for i in a:
rel = i.get("rel")
if rel[0] == 'tag':
href = i.get("href")
if re.search(r'/tag/', href):
element["tags"].append(i.text)
if re.search(r'/archives/', href):
element["categories"].append(i.text)
for i in liste:
for j in element[i]:
element_exist = False
title_element = self._removeSpace(j)
for index in range(1,10):
self._logger.info("{0} : search {1} with index {2} : {3}".format(self._name, i, index, title_element))
try:
params = {"search":title_element, "per_page":"100", "page":index}
page = self._request.get("{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
self._logger.debug("{0} : content {3} {2} : {1}".format(self._name, result, title_element, i))
if len(result) > 0:
for k in result:
title_rendered = k["name"]
self._logger.debug("{0} : content {2} : {1}".format(self._name, title_rendered, i))
self._logger.debug("{0} : size of content {3} : {2} - {1}".format(self._name, len(title_rendered), len(title_element), i))
if len(title_element) != len(title_rendered):
title_rendered = self._replaceCaracter(title_rendered)
if title_element == title_rendered:
self._logger.info("{0} : {1} found : {2}".format(self._name, i, title_rendered))
element_exist = True
listelement[i].append(k["id"])
else:
break
elif page.status_code == 400:
self._logger.error("{0} : {1} not found due status code : {2}".format(self._name, i, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
break
else:
self._logger.error("{0} : {1} not found due status code : {2}".format(self._name, i, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for {1} : {2}".format(self._name, i, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for {1} : {2}".format(self._name, i, err))
self._logger.debug("{0} : Element {3} {2} is {1}".format(self._name, element_exist, title_element, i))
if element_exist == False:
data = {"name": title_element}
self._logger.info("{0} : Create {1} : {2}".format(self._name, i, title_element))
self._logger.debug("{0} : Data : {1}".format(self._name, data))
try:
page = self._request.post("{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 201:
self._logger.info("{0} : {1} created : {2}".format(self._name, i, j))
result = page.json()
listelement[i].append(result["id"])
else:
self._logger.error("{0} : {1} not added due status code : {2}".format(self._name, i, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for post {1} : {2}".format(self._name, i, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for post {1} : {2}".format(self._name, i, err))
title = articletitle[0].text
author = articleacreator[0].text.lower()
if len(self._author) > 0:
author = self._author
body = articlebody[0].find_all("p")
bodyhtml = "<p>"
for i in body:
if len(i.text) == 1:
bodyhtml = bodyhtml + "<br />"
else:
bodyhtml = bodyhtml + str(i).replace("<p>", "").replace("</p>", "").replace("<br>", "<br />") + "<br />"
bodyhtml = bodyhtml + "</p>"
for i in list_img:
o = urlparse(i["new_src"])
bodyhtml = bodyhtml.replace(i["old_href"], o.path)
bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = ""
if len(dateheader) > 0:
time = dateheader[0].text.split(" ")
self._logger.debug("{0} : Title post : |{1}|".format(self._name, title))
title = self._removeSpace(title)
self._logger.debug("{0} : Rendered Title post : |{1}|".format(self._name, title))
data = {"title":title, "content":bodyhtml, "status":"publish", "tags": listelement["tags"], "categories": listelement["categories"]}
if len(dateheader) > 0:
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
self._logger.debug("{0} : Data for post : |{1}| : {2}" .format(self._name, title, data))
data["author"] = self._getAuthor(author)
page_is_exist = False
for index in range(1,10):
params = {"search": title, "per_page":100, "page": index}
try:
self._logger.info("{0} : Search post with index {2} : {1}".format(self._name, title, index))
page = self._request.get("{1}://{0}/wp-json/wp/v2/posts".format(self._wordpress, self._protocol), auth=self._basic, params=params, headers=self._headers_json)
if page.status_code == 200:
self._logger.debug("{0} : Encoding : {1}".format(self._name, page.encoding))
page.encoding = "utf-8"
result = page.json()
if len(result) == 0:
break
self._logger.info("{0} : Number result posts : {1}".format(self._name, len(result)))
count = 0
for i in result:
title_rendered = i["title"]["rendered"]
self._logger.info("{0} : Search title posts for |{2}| : |{1}|".format(self._name, title_rendered, title))
if len(title_rendered) != len(title):
title_rendered = self._replaceCaracter(title_rendered)
self._logger.debug("{0} : Search title posts for |{2}| : |{1}|".format(self._name, title_rendered, title))
self._logger.debug("{0} : SIze of title : {1} - {2}".format(self._name, len(title), len(title_rendered)))
if title_rendered == title:
if self._no_update is False:
page_is_exist = True
post_id = i["id"]
count = count + 1
if count > 1:
self._logger.info("{0} : Page {1} is double and going to delete".format(self._name, title))
try:
params = {"force":1}
page = self._request.delete("{2}://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id, self._protocol), auth=self._basic, headers=self._headers_json, params=params)
if page.status_code == 200:
self._logger.info("{0} : Post deleted : {1}".format(self._name, title))
else:
self._logger.error("{0} : Post not updated due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for deleted post : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for deleted post : {1}".format(self._name, err))
else:
self._logger.debug("{0} : Data for post to update : {1}".format(self._name, i))
self._logger.info("{0} : Page {1} already exist and going to update".format(self._name, title))
try:
page = self._request.post("{2}://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id, self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
self._logger.info("{0} : Post updated : {1}".format(self._name, title))
self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
else:
self._logger.error("{0} : Post not updated due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for update post : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for update post : {1}".format(self._name, err))
if page.status_code == 400:
self._logger.error("{0} : Connection for update post unauthorized : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
break
else:
self._logger.error("{0} : Connection for update post error with status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for search post : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for search post : {1}".format(self._name, err))
if page_is_exist is False and self._no_create is False:
try:
self._logger.info("{0} : Creating posts : {1}".format(self._name, data["title"]))
page = self._request.post("{1}://{0}/wp-json/wp/v2/posts".format(self._wordpress, self._protocol), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
self._logger.info("{0} : Post added : {1}".format(self._name, result["title"]["raw"]))
self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
else:
self._logger.error("{0} : Post not added due status code : {1}".format(self._name, r.status_code))
self._logger.debug("{0} : {1}".format(self._name, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for create post : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for create post : {1}".format(self._name, err))

394
lib/WPMenu.py Normal file
View File

@@ -0,0 +1,394 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, logging, re, json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class WPMenu:
# Constructor
def __init__(self, name="Thread-0", basic=None, canalblog="", wordpress="", logger=None, parser="html.parser", ssl_canalblog=True, ssl_wordpress=True):
self._name = name
self._basic = basic
self._canalblog = canalblog
self._wordpress = wordpress
self._logger = logger
self._parser = parser
self._headers_json = {'Content-Type': 'application/json; charset=utf-8', 'Accept':'application/json'}
self._protocol_wordpress = "https"
self._protocol_canalblog = "https"
self._directory = "backup"
if ssl_wordpress is False:
self._protocol_wordpress = "http"
if ssl_canalblog is False:
self._protocol_canalblog = "http"
self._request_canalblog = requests.Session()
self._request_wordpress = requests.Session()
retries = Retry(connect=10, read=10, redirect=5,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
self._request_canalblog.mount('{0}://'.format(self._protocol_canalblog), HTTPAdapter(max_retries=retries))
self._request_wordpress.mount('{0}://'.format(self._protocol_wordpress), HTTPAdapter(max_retries=retries))
# Destructor
def __del__(self):
print("{0} : Import finished for {1}".format(self._name, self._wordpress))
# Public method
## From file
def fromFile(self, files):
if os.path.exists(files):
with open(files, 'r') as f:
self._logger.info("{0} : File is being processed : {1}".format(self._name, files))
content = f.read()
self._menu(content)
else:
self._logger.error("{0} : File isn't exist : {1}".format(self._name, files))
## Get from URL
def fromUrl(self, canalblog):
self._canalblog = canalblog
try:
o = urlparse(canalblog)
o = o._replace(scheme=self._protocol_canalblog)
i = o.geturl().replace(":///", "://")
page = self._request_canalblog.get(i)
if page.status_code == 200:
self._logger.info("{0} : Page web is being processed : {1}".format(self._name, i))
self._menu(page.content)
else:
self._logger.error("{0} : index didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get url {1} : {2}".format(self._name, canalblog, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get url {1} : {2}".format(self._name, canalblog, err))
## replace caracter
def _replaceCaracter(self, title_rendered):
list_replace = {'&rsquo;': "'", '&#8211;': '-', '&#8230;': '...', '« ': '"', ' »': '"', '« ': '"', ' »': '"', '&#8217;': "'", '"&lsquo;': "'"}
for old, new in list_replace.items():
title_rendered = title_rendered.replace(old, new)
return title_rendered
def _getIdfromTitlePost(self, content):
idMenu = {"id":0, "type":"", "link":""}
soup = BeautifulSoup(content, self._parser)
articletitle = soup.find_all("h2", class_="articletitle")
if len(articletitle) > 0:
articletitle = articletitle[0].get_text()
search = "posts"
post_type = "post"
if len(articletitle) == 0:
articletitle = soup.find_all("div", class_="albumbody")
if len(articletitle) > 0:
articletitle = articletitle[0].find("h2").get_text()
search = "pages"
post_type = "page"
exist = False
for index in range(1,10):
if exist is False:
params = {"search":articletitle, "per_page":100, "page":index}
try:
self._logger.debug("{0} : Get Url for {3} : {1} {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/{2}".format(self._wordpress, self._protocol_wordpress, search), params, search))
page = self._request_wordpress.get("{1}://{0}/wp-json/wp/v2/{2}".format(self._wordpress, self._protocol_wordpress, search), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
self._logger.info("{0} : Get content {2} : {1}".format(self._name, len(result), search))
if len(result) > 0:
for i in result:
title_rendered = i["title"]["rendered"]
if len(articletitle) != len(title_rendered):
title_rendered = self._replaceCaracter(title_rendered)
self._logger.debug("{0} : comparaison debug {1} {2}".format(self._name, articletitle, title_rendered))
if articletitle == title_rendered:
self._logger.debug("{0} : get {2} id : {1}".format(self._name, i, search))
idMenu = {"id":i["id"], "type":post_type, "link": i["link"]}
exist = True
else:
self._logger.debug("{0} : {2} {1}".format(self._name, result, len(result)))
break
elif page.status_code == 400:
self._logger.debug("{0} : {2} {1}".format(self._name, page.content, page.status_code))
break
else:
self._logger.error("{0} : Post didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get content : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get content : {1} ".format(self._name, err))
return idMenu
def _getIdFromPost(self, href):
idMenu = {"id":0, "type":"", "link":""}
o = urlparse(href)
if len(o.netloc) > 0:
try:
page = self._request_canalblog.get(href)
if page.status_code == 200:
self._logger.info("{0} : Get content : {1}".format(self._name, href))
idMenu = self._getIdfromTitlePost(page.content)
else:
self._logger.error("{0} : {2} didn't get due status code : {1}".format(self._name, page.status_code, href))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get url {1} : {2}".format(self._name, href, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get url {1} : {2}".format(self._name, href, err))
else:
if os.path.exists("{0}/..{1}".format(self._directory, o.path)):
try:
content = open("{0}/..{1}".format(self._directory, o.path), "r")
idMenu = self._getIdfromTitlePost(content)
except Exception as err:
self._logger.error("{0} : Exception error for get file content {1} : {2}".format(self._name, href, err))
return idMenu
def _getIdFromReverse(self, title, href):
self._logger.info("{0} : get title {1} from href {2}".format(self._name, title, href))
idMenu = {"id":0, "type":"", "link":""}
if href != "#":
title = href[::-1]
second_title = title.split("/")[2]
second_title = second_title[::-1]
link = title.split("/")[0]
link = link[::-1]
title = title.split("/")[1]
title = title[::-1]
self._logger.info("{0} link {1} title {2}".format(self._name, link, title))
if link == "index.html":
if second_title == "albums":
idMenu = self._getIdFromPost(href)
else:
idMenu = self._getId(title)
else:
idMenu = self._getIdFromPost(href)
return idMenu
def _getId(self, title):
idMenu = {"id": 0, "type":"", "link":""}
exist = False
if exist is False:
for i in ["categories", "tags"]:
typeId = "category"
if i == "tags":
typeId = "tag"
for index in range(1,10):
try:
params = {"search":title, "per_page":"100", "page":index}
self._logger.info("{0} Get menu {1} {2} {3}".format(self._name, "{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol_wordpress), index, title))
page = self._request_wordpress.get("{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol_wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
if len(result) > 0:
for j in result:
self._logger.info("{0} info : {1} {2} {3}".format(self._name, j["name"], j["slug"], title))
if j["name"] == title or j["slug"] == title:
self._logger.info("{0} : comparaison ok : {1} {2}".format(self._name, j["id"], i))
idMenu = {"id": j["id"], "type": typeId, "link": j["link"]}
exist = True
else:
break
elif page.status_code == 400:
break
else:
self._logger.error("{0} : {2} didn't get due status code : {1}".format(self._name, page.status_code, i))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get url {1} : {2}".format(self._name, "{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol_wordpress), err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get url {1} : {2}".format(self._name, "{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i, self._protocol_wordpress), err))
return idMenu
def _menu(self, content):
soup = BeautifulSoup(content, self._parser)
ul = soup.find("ul", id="listsmooth")
menu = list()
children = list()
for anchor in ul.find_all("li"):
parent = anchor.find("a").get_text().replace(" \xa0", "")
href = anchor.find("a").get("href")
if href == "{0}://{1}/".format(self._protocol_canalblog, self._canalblog):
parent = "home"
itemMenu = {"id":"", "type":"", "title": parent, "link":"", "href":href, "children":list()}
if href == "#":
li = anchor.find("ul").find_all("li")
for child in li:
a = child.find("a")
href = a.get("href")
self._logger.info("{0} Parent {1} : Child {2}".format(self._name, parent, a.get_text()))
children.append({"title": a.get_text(), "parent": parent, "href":href, "link":""})
menu.append(itemMenu)
for i in range(0, len(children)):
self._logger.info("{0} : Child {1} {2}".format(self._name, children[i], i))
for j in range(0, len(menu)):
if j < len(menu):
if menu[j]["title"] == children[i]["title"]:
self._logger.info("{0} : Parent {1} {2}".format(self._name, menu[j], j))
del menu[j]
for j in range(0, len(menu)):
self._logger.info("{0} : Children for : {1}".format(self._name, menu[j]["title"]))
if menu[j]["title"] == children[i]["parent"]:
menu[j]["children"].append({"id":"", "type":"", "title":children[i]["title"], "parent": children[i]["parent"], "link":"", "href":children[i]["href"]})
for i in range(0, len(menu)):
self._logger.info("{0} : Menu {1} {2}".format(self._name, menu[i]["title"], len(menu[i]["children"])))
if menu[i]["title"] != "home":
for j in range(0, len(menu[i]["children"])):
idMenu = self._getId(menu[i]["children"][j]["title"])
if idMenu["id"] == 0:
self._logger.debug("{0} : content children {1}".format(self._name, menu[i]["children"][j]))
idMenu = self._getIdFromReverse(menu[i]["children"][j]["title"], menu[i]["children"][j]["href"])
if idMenu["id"] != 0:
menu[i]["children"][j] = {"id":idMenu["id"], "type": idMenu["type"], "link": idMenu["link"], "title": menu[i]["children"][j]["title"], "parent": menu[i]["children"][j]["parent"]}
idMenu = self._getId(menu[i]["title"])
self._logger.debug("{0} : content parent {1}".format(self._name, menu[i]))
self._logger.debug("{0} : content idMenu {1}".format(self._name, idMenu))
if idMenu["id"] == 0:
idMenu = self._getIdFromReverse(menu[i]["title"], menu[i]["href"])
if idMenu["id"] != 0:
menu[i] = {"id":idMenu["id"], "type": idMenu["type"], "title":menu[i]["title"], "link":idMenu["link"], "children": menu[i]["children"]}
self._createMenu(menu)
def _createItemMenu(self, idMenu, itemMenu, parent):
idItemMenu = 0
self._logger.info("{0} : Create item menu from API Wordpress : {1}".format(self._name, self._wordpress))
try:
params = {"search": itemMenu["title"], "menus": idMenu}
page = self._request_wordpress.get("{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
for i in result:
if self._replaceCaracter(i["title"]["rendered"]) == itemMenu["title"]:
idItemMenu = int(i["id"])
self._logger.info("{0} : Length of result for item menus : {1}".format(self._name, len(result)))
url = "{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress)
if idItemMenu != 0:
url = "{1}://{0}/wp-json/wp/v2/menu-items/{2}".format(self._wordpress, self._protocol_wordpress, idItemMenu)
try:
objectt = itemMenu["type"]
if objectt == "tag":
objectt = "post_tag"
data = {"title": itemMenu["title"], "status": "publish", "parent":parent, "menus":idMenu, "url":"#"}
if itemMenu["title"] == "home":
data = {"title": itemMenu["title"], "status": "publish", "parent":parent, "menus":idMenu, "url":"{0}://{1}".format(self._protocol_wordpress, self._wordpress)}
if type(itemMenu["id"]) is str:
if len(itemMenu["id"]) > 0:
data = {"title": itemMenu["title"], "status": "publish", "parent":parent, "url": itemMenu["link"], "menus":idMenu, "object":objectt, "object_id":int(itemMenu["id"])}
elif type(itemMenu["id"]) is int:
data = {"title": itemMenu["title"], "status": "publish", "parent":parent, "url": itemMenu["link"], "menus":idMenu, "object":objectt, "object_id":itemMenu["id"]}
self._logger.debug("{0} : data for create/update : {1}".format(self._name, data))
page = self._request_wordpress.post(url, auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code in [201, 200]:
result = page.json()
idItemMenu = int(result["id"])
self._logger.info("{0} : create/update item menu : {1}".format(self._name, itemMenu["title"]))
else:
self._logger.error("{0} : Create menu items for {2} didn't get due status code : {1}".format(self._name, page.status_code, itemMenu["title"]))
self._logger.debug("{0} : {1} {2}".format(self._name, page.content, itemMenu))
except ConnectionError as err:
self._logger.error("{0} : Connection error for create item menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress), err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for create item menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress), err))
else:
self._logger.error("{0} : Get menu items didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get item menus {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress), err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get item menus {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menu-items".format(self._wordpress, self._protocol_wordpress), err))
return idItemMenu
def _createMenu(self, menu):
title = "Menu {0}".format(self._wordpress)
self._logger.info("{0} : Create menu from API Wordpress : {1}".format(self._name, title))
try:
params = {"search": title}
page = self._request_wordpress.get("{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
self._logger.info("{0} : Get content menus : {1}".format(self._name, len(result)))
idMenu = 0
if len(result) == 0:
self._logger.info("{0} : Create menu : {1}".format(self._name, title))
data = {"name": title}
try:
page = self._request_wordpress.post("{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), auth=self._basic, headers=self._headers_json, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
self._logger.debug("{0} : Get menus : {1}".format(self._name, result))
if len(result) > 0:
idMenu = result["id"]
else:
self._logger.error("{0} : Post menu didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for create menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), err))
else:
self._logger.debug("{0} : Get menus : {1}".format(self._name, result))
for i in result:
self._logger.debug("{0} : List menus : {1}".format(self._name, i))
if i["name"] == title:
idMenu = i["id"]
self._logger.info("{0} : Get ID menus : {1}".format(self._name, idMenu))
self._addItemMenu(menu, idMenu)
else:
self._logger.error("{0} : Get menu didn't get due status code : {1}".format(self._name, page.status_code))
self._logger.debug("{0} : {1}".format(self._name, page.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for get menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get menu {1} : {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/menus".format(self._wordpress, self._protocol_wordpress), err))
def _addItemMenu(self, menu, idMenu):
self._logger.info("{0} : add item to menu : {1}".format(self._name, idMenu))
parent = 0
for i in menu:
parent = 0
self._logger.debug("{0} : debug create item menu : {1}".format(self._name, i))
parent = self._createItemMenu(idMenu, i, parent)
for j in i["children"]:
self._createItemMenu(idMenu, j, parent)

128
lib/WPRemove.py Normal file
View File

@@ -0,0 +1,128 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, logging, re, json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class WPRemove:
# Constructor
def __init__(self, index_name=1, number_thread=1, basic=None, wordpress="", logger=None, ssl_wordpress=True):
self._basic = basic
self._wordpress = wordpress
self._logger = logger
self._headers_json = {'Content-Type': 'application/json', 'Accept':'application/json'}
self._name = "Thread-{0}".format(index_name)
self._index_thread = index_name
self._protocol = "https"
self._number_thread = number_thread
if ssl_wordpress is False:
self._protocol = "http"
self._request = requests.Session()
retries = Retry(connect=10, read=10, redirect=5,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
self._request.mount('{0}://'.format(self._protocol), HTTPAdapter(max_retries=retries))
# Destructor
def __del__(self):
print("{0} : Import finished for {1}".format(self._name, self._wordpress))
# Public method
def _getCount(self, composant):
count = 0
try:
params = {"per_page":1}
self._logger.info("{0} : Get count {2} to remove for url : {1}".format(self._name, self._wordpress, composant))
r = self._request.get("{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, composant, self._protocol), params=params, auth=self._basic, headers=self._headers_json)
if r.status_code == 200:
count = int(r.headers["X-WP-Total"])
else:
self._logger.error("{0} : Error for list to remove {1} due status code {2}".format(self._name, composant, r.status_code))
self._logger.debug("{0} : Content error for {1} : {2}".format(self._name, composant, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for list {1} to remove : {2}".format(self._name, composant, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for list {1} to remove : {2}".format(self._name, composant, err))
return count
def setUrl(self, wordpress):
self._wordpress = wordpress
def cleanPosts(self):
self._removeAll("posts")
def cleanTags(self):
self._removeAll("tags")
def cleanCategories(self):
self._removeAll("categories")
def cleanMedia(self):
self._removeAll("media")
# Private method
def _removeAll(self, composant):
count = self._getCount(composant)
self._logger.debug("{0} : Count for {1} : {2}".format(self._name, composant, count))
if count > 0:
self._logger.debug("{0} : Number thread for {1} : {2}".format(self._name, composant, self._number_thread))
page = count / int(self._number_thread)
self._logger.debug("{0} : Page for {1} : {2}".format(self._name, composant, page))
if page > int(page):
page = int(page) + 1
if page > 100:
page = 100
params = {"per_page":page, "page":self._index_thread}
self._logger.info("{0} : Params for {1} : {2}".format(self._name, composant, params))
try:
self._logger.info("{0} : List {2} to remove for url : {1}".format(self._name, self._wordpress, composant))
r = self._request.get("{2}://{0}/wp-json/wp/v2/{1}".format(self._wordpress, composant, self._protocol), auth=self._basic, params=params, headers=self._headers_json)
if r.status_code == 200:
result = r.json()
if len(result) > 0:
for i in result:
is_delete = True
self._logger.info(i["slug"])
if i["slug"] == "non-classe":
is_delete = False
if is_delete is True:
if composant == "tags" or composant == "categories":
title = i["name"]
else:
title = i["title"]["rendered"]
self._logger.info("{0} : Remove {2} for url {1} : {3}".format(self._name, self._wordpress, composant, title))
params = {"force":1}
try:
r = self._request.delete("{3}://{0}/wp-json/wp/v2/{1}/{2}".format(self._wordpress, composant, i["id"], self._protocol), auth=self._basic, headers=self._headers_json , params=params)
if r.status_code == 200:
self._logger.info("{0} : Post removed for URL {1} {2} : {3}".format(self._name, self._wordpress, composant, title))
else:
self._logger.error("{0} : Connection error for post {1} {2} {3} with status code {4}".format(self._name, self._wordpress, composant, title, r.status_code))
except ConnectionError as err:
self._logger.error("{0} : Connection error for {1} remove : {2}".format(self._name, composant, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for {1} remove : {2}".format(self._name, composant, err))
self._removeAll(composant)
if r.status_code == 400:
self._logger.error("{0} : No content for {1} to remove : {2}".format(self._name, composant, r.status_code))
else:
self._logger.error("{0} : Error for list to remove {1} due status code {2}".format(self._name, composant, r.status_code))
self._logger.debug("{0} : Content error for {1} : {2}".format(self._name, composant, r.content))
except ConnectionError as err:
self._logger.error("{0} : Connection error for list {1} to remove : {2}".format(self._name, composant, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for list {1} to remove : {2}".format(self._name, composant, err))

0
lib/__init__.py Normal file
View File

View File

@@ -1,241 +0,0 @@
#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging
def mkdirPath(path_dir, logger):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
for i in pathh:
makedir.append(i)
repath = "/".join(makedir)
if not os.path.exists(repath):
logger.debug("Dossier crée : {0}".format(repath))
try:
if len(repath) > 0:
os.mkdir(repath)
except Exception as err:
logger.error("Directory error : {0}".format(err))
logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
exit(1)
def getScriptCss(url, js, css, logger):
try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
if js is True:
script = soup.find_all("script")
for anchor in script:
src = anchor.get("src", "/")
if src != "/":
try:
u = urlparse(url)
o = urlparse(src)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
if css is True:
link = soup.find_all("link")
for anchor in link:
rel = anchor.get("rel")
if rel[0] == "stylesheet":
href = anchor.get("href", "/")
if href != "/":
try:
u = urlparse(url)
o = urlparse(href)
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
if o.netloc == "":
o = o._replace(netloc=u.netloc)
o = o._replace(scheme=u.scheme)
page_url.append(o.geturl())
return page_url
def getImg(webpage, logger):
page_img = []
for i in webpage:
try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
img = soup.find_all("img")
logger.info("image from page: {0} : ".format(i))
for anchor in img:
src = anchor.get("src", "/")
if src != "/":
if src not in page_img:
logger.info("image: {0} : ".format(src))
page_img.append(src)
return page_img
def getUrlPage(url, logger):
try:
page = requests.get(url)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
page_url = []
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
if href != "#":
page_url.append(href)
webpage = []
for i in page_url:
try:
page = requests.get(i)
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
logger.info("page : {0}".format(i))
if i not in webpage:
webpage.append(i)
soup = BeautifulSoup(page.text, 'html.parser')
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
if len(pagingfirstline) > 1:
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
number_page = element_lastpage.split("-")[0].split("p")[1]
number_lastpage = int(number_page) / 10
for j in range(1,int(number_lastpage)):
paging = j * 10
categorie = urlparse(i).path.split("/")
url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
if len(categorie) > 2:
url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
logger.info(url_paging)
if url_paging not in webpage:
webpage.append(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
if href not in webpage:
try:
o = urlparse(href)
o = o._replace(scheme="https").geturl()
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
webpage.append(o)
return webpage
def downloadPage(webpage, backup_dir, logger):
for i in range(0, len(webpage)):
try:
o = urlparse(webpage[i])
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
path_web = o.path.split("/")
filePageWeb = path_web[len(path_web)-1]
path_web.pop(len(path_web)-1)
dir_page_web = "/".join(path_web)
mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web), logger)
try:
r = requests.get(webpage[i])
except Exception as err:
logger.error("Connection error : {0}".format(err))
exit(1)
if r.status_code == 200:
fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
if len(dir_page_web) > 0 and len(filePageWeb) > 0:
fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
try:
open(fileDownload, "wb").write(r.content)
except Exception as err:
logger.error("file error : {0}".format(err))
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="canblog URL to be scraping", required=True)
parser.add_argument("--dir",
default="backup",
help="backup file path")
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
parser.add_argument("--quiet", help="No console output", action="store_true")
args = parser.parse_args()
logger = logging.getLogger('web_scrap')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.quiet is False:
ch = logging.StreamHandler()
if args.debug is True:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(args.logfile) > 0:
fileHandler = logging.FileHandler(args.logfile)
if args.debug is True:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
try:
o = urlparse(args.url)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
if args.js is False:
script = getScriptCss(url, True, False, logger)
downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
if args.css is False:
css = getScriptCss(url, False, True, logger)
downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
if args.html is False or args.img is False:
webpage = getUrlPage(url, logger)
if args.html is False:
downloadPage(webpage, args.dir, logger)
if args.img is False:
page_src = getImg(webpage, logger)
downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)