Compare commits
9 Commits
Author | SHA1 | Date | |
---|---|---|---|
dce2c2dfa5 | |||
eaec1ba9d4 | |||
3059f785c2 | |||
279a9f2786 | |||
963f83ae81 | |||
7b154e3a1d | |||
e5109204aa | |||
2279e4b0b6 | |||
2e21040196 |
@@ -55,7 +55,7 @@ def remove(index, number, args, basic, logger, ssl_wordpress):
|
|||||||
|
|
||||||
def download(name_thread, max_thread, url, logger, parser, directory, html, img, ssl_canalblog, revert, tmp):
|
def download(name_thread, max_thread, url, logger, parser, directory, html, img, ssl_canalblog, revert, tmp):
|
||||||
exportWp = WPExport(name="Thread-{0}".format(int(name_thread) + 1), url=url, logger=logger, parser=parser, directory=directory, ssl_canalblog=ssl_canalblog)
|
exportWp = WPExport(name="Thread-{0}".format(int(name_thread) + 1), url=url, logger=logger, parser=parser, directory=directory, ssl_canalblog=ssl_canalblog)
|
||||||
if not revert:
|
if revert is False:
|
||||||
exportWp.getUrlPage(name_thread, max_thread)
|
exportWp.getUrlPage(name_thread, max_thread)
|
||||||
for i in ["article", "page"]:
|
for i in ["article", "page"]:
|
||||||
for j in ["publications", "principal"]:
|
for j in ["publications", "principal"]:
|
||||||
@@ -170,7 +170,7 @@ if __name__ == '__main__':
|
|||||||
import_parser.add_argument("--no-create", help="No create post", dest="create", default="store_false", action="store_true")
|
import_parser.add_argument("--no-create", help="No create post", dest="create", default="store_false", action="store_true")
|
||||||
import_parser.add_argument("--no-update", help="No update post", dest="update", default="store_false", action="store_true")
|
import_parser.add_argument("--no-update", help="No update post", dest="update", default="store_false", action="store_true")
|
||||||
import_parser.add_argument("--no-image", help="No image add or update", dest="image", default="store_false", action="store_true")
|
import_parser.add_argument("--no-image", help="No image add or update", dest="image", default="store_false", action="store_true")
|
||||||
import_parser.add_argument("--author-album", dest=author, help="Define author for page album", default="")
|
import_parser.add_argument("--author", dest="author", help="Define author", default="")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -259,7 +259,7 @@ if __name__ == '__main__':
|
|||||||
basic = HTTPBasicAuth(args.user, password)
|
basic = HTTPBasicAuth(args.user, password)
|
||||||
if args.command == "import":
|
if args.command == "import":
|
||||||
wordpress = args.wordpress.split(",")
|
wordpress = args.wordpress.split(",")
|
||||||
importWp = WPimport(basic=basic, wordpress="", logger=logger, parser=args.parser, ssl_wordpress=ssl_wordpress, author=args.author)
|
importWp = WPimport(basic=basic, wordpress="", logger=logger, parser=args.parser, ssl_wordpress=ssl_wordpress, author=args.author, ssl_canalblog=ssl_canalblog)
|
||||||
if len(args.file) > 0:
|
if len(args.file) > 0:
|
||||||
for i in wordpress:
|
for i in wordpress:
|
||||||
importWp.setUrl(i)
|
importWp.setUrl(i)
|
||||||
|
@@ -58,14 +58,26 @@ class WPExport:
|
|||||||
# Download HTML
|
# Download HTML
|
||||||
|
|
||||||
def downloadHTML(self, first, second):
|
def downloadHTML(self, first, second):
|
||||||
self._downloadPage(webpage[first][second], self._dir)
|
try:
|
||||||
|
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
|
||||||
|
webpage = json.loads(file.read())
|
||||||
|
self._downloadPage(webpage[first][second], self._dir)
|
||||||
|
except Exception as ex:
|
||||||
|
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
|
||||||
|
|
||||||
# Download Image
|
# Download Image
|
||||||
|
|
||||||
def downloadImg(self, first, second):
|
def downloadImg(self, first, second):
|
||||||
page_src = self._getImg(webpage[first][second])
|
try:
|
||||||
o = urlparse(self._url)
|
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
|
||||||
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
|
webpage = json.loads(file.read())
|
||||||
|
page_src = self._getImg(webpage[first][second])
|
||||||
|
o = urlparse(self._url)
|
||||||
|
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
|
||||||
|
except Exception as ex:
|
||||||
|
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Get URL
|
# Get URL
|
||||||
|
@@ -8,7 +8,7 @@ from requests.packages.urllib3.util.retry import Retry
|
|||||||
|
|
||||||
class WPimport:
|
class WPimport:
|
||||||
# Constructor
|
# Constructor
|
||||||
def __init__(self, name="Thread-0", basic=None, wordpress="", logger=None, parser="html.parser", ssl_wordpress=True, no_create=False, no_update=False, no_image=False, tmp="/tmp/import_export_canablog", author=""):
|
def __init__(self, name="Thread-0", basic=None, wordpress="", logger=None, parser="html.parser", ssl_wordpress=True, no_create=False, no_update=False, no_image=False, tmp="/tmp/import_export_canablog", author="", ssl_canalblog=True):
|
||||||
self._name = name
|
self._name = name
|
||||||
self._basic = basic
|
self._basic = basic
|
||||||
self._wordpress = wordpress
|
self._wordpress = wordpress
|
||||||
@@ -20,7 +20,7 @@ class WPimport:
|
|||||||
if ssl_wordpress is False:
|
if ssl_wordpress is False:
|
||||||
self._protocol = "http"
|
self._protocol = "http"
|
||||||
self._request = requests.Session()
|
self._request = requests.Session()
|
||||||
|
self._ssl_canalblog = ssl_canalblog
|
||||||
retries = Retry(connect=10, read=10, redirect=5,
|
retries = Retry(connect=10, read=10, redirect=5,
|
||||||
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
|
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
|
||||||
|
|
||||||
@@ -139,6 +139,40 @@ class WPimport:
|
|||||||
self._logger.error("{0} : Exception error for get author : {1}".format(self._name, err))
|
self._logger.error("{0} : Exception error for get author : {1}".format(self._name, err))
|
||||||
return author
|
return author
|
||||||
|
|
||||||
|
def _getInfoAlbum(self, link):
|
||||||
|
if self._ssl_canalblog:
|
||||||
|
link = link.replace("http", "https")
|
||||||
|
self._logger.info("{0} : Info album : {1}".format(self._name, link))
|
||||||
|
link_o = urlparse(link)
|
||||||
|
if len(link_o.netloc) > 0:
|
||||||
|
self._logger.info("{0} : get album info from web : {1}".format(self._name, link_o))
|
||||||
|
try:
|
||||||
|
response = self._request.get(link)
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._logger.info("{0} : get content info from web : {1}".format(self._name, link))
|
||||||
|
page_img = response.content
|
||||||
|
except ConnectionError as err:
|
||||||
|
self._logger.error("{0} : Connection error for get album info : {1}".format(self._name, err))
|
||||||
|
exit(1)
|
||||||
|
except Exception as err:
|
||||||
|
self._logger.error("{0} : Exception error for get album info : {1}".format(self._name, err))
|
||||||
|
exit(1)
|
||||||
|
else:
|
||||||
|
self._logger.info("{0} : get album info from file : {1}".format(self._name, link_o))
|
||||||
|
if os.path.exists("{0}/..{1}".format(self._directory, link_o)):
|
||||||
|
page_img = open("{0}/..{1}".format(self._directory, link_o), "r")
|
||||||
|
soup = BeautifulSoup(page_img, self._parser)
|
||||||
|
paragraph = soup.find("div", class_="albumbody").find("p")
|
||||||
|
self._logger.info("{0} get paragraph : {1}".format(self._name, paragraph))
|
||||||
|
split_paragraph = str(paragraph).split("<br>")
|
||||||
|
self._logger.info("{0} length paragraph splitted : {1}".format(self._name, len(split_paragraph)))
|
||||||
|
if len(split_paragraph) == 1:
|
||||||
|
split_paragraph = str(paragraph).split("<br/>")
|
||||||
|
self._logger.info("{0} get paragraph splitted : {1}".format(self._name, split_paragraph))
|
||||||
|
author = split_paragraph[1].split(":")[1].replace(" ", "").lower()
|
||||||
|
return author
|
||||||
|
|
||||||
|
|
||||||
def _addOrUpdateAlbum(self, soup):
|
def _addOrUpdateAlbum(self, soup):
|
||||||
self._logger.info("{0} : Add/Update Album".format(self._name))
|
self._logger.info("{0} : Add/Update Album".format(self._name))
|
||||||
albumbody = soup.find("div", class_="albumbody")
|
albumbody = soup.find("div", class_="albumbody")
|
||||||
@@ -195,8 +229,19 @@ class WPimport:
|
|||||||
data = {"title":albumtitle, "content":content_html, "status":"publish"}
|
data = {"title":albumtitle, "content":content_html, "status":"publish"}
|
||||||
if len(self._author) > 0:
|
if len(self._author) > 0:
|
||||||
author = self._getAuthor(self._author)
|
author = self._getAuthor(self._author)
|
||||||
if author != 0:
|
else:
|
||||||
data = {"title":albumtitle, "content":content_html, "status":"publish", "author":author}
|
link_a = albumbody.find_all("a")
|
||||||
|
for i in link_a:
|
||||||
|
if re.search(r"/albums/", i.get("href", "/")):
|
||||||
|
href_a = i.get("href", "/")
|
||||||
|
break
|
||||||
|
author = self._getInfoAlbum(href_a)
|
||||||
|
self._logger.info("{0} : author : {1}".format(self._name, author))
|
||||||
|
author = self._getAuthor(author)
|
||||||
|
data = {"title":albumtitle, "content":content_html, "status":"publish"}
|
||||||
|
|
||||||
|
if author != 0:
|
||||||
|
data = {"title":albumtitle, "content":content_html, "status":"publish", "author":author}
|
||||||
self._logger.debug("{0} : data for album page : {1}".format(self._name, data))
|
self._logger.debug("{0} : data for album page : {1}".format(self._name, data))
|
||||||
for index in range(1,10):
|
for index in range(1,10):
|
||||||
params = {"search": albumtitle, "per_page":100, "page": index}
|
params = {"search": albumtitle, "per_page":100, "page": index}
|
||||||
@@ -806,7 +851,9 @@ class WPimport:
|
|||||||
self._logger.error("{0} : Exception error for post {1} : {2}".format(self._name, i, err))
|
self._logger.error("{0} : Exception error for post {1} : {2}".format(self._name, i, err))
|
||||||
|
|
||||||
title = articletitle[0].text
|
title = articletitle[0].text
|
||||||
author = articleacreator[0].text.lower()
|
author = articleacreator[0].text.lower()
|
||||||
|
if len(self._author) > 0:
|
||||||
|
author = self._author
|
||||||
body = articlebody[0].find_all("p")
|
body = articlebody[0].find_all("p")
|
||||||
bodyhtml = "<p>"
|
bodyhtml = "<p>"
|
||||||
for i in body:
|
for i in body:
|
||||||
|
Reference in New Issue
Block a user