13 Commits

Author SHA1 Message Date
dce2c2dfa5 Merge pull request 'fix webpage' (#25) from fix-export into master
Reviewed-on: #25
2023-09-05 20:10:07 +00:00
eaec1ba9d4 fix webpage 2023-09-05 22:06:14 +02:00
3059f785c2 Merge pull request 'album-plus' (#24) from album-plus into master
Reviewed-on: #24
2023-09-01 22:27:57 +00:00
279a9f2786 fix parameter author 2023-09-02 00:27:18 +02:00
963f83ae81 fix author 2023-09-02 00:26:50 +02:00
7b154e3a1d add author 2023-08-31 22:50:31 +02:00
e5109204aa get link with album 2023-08-30 23:45:16 +02:00
2279e4b0b6 search title album 50% 2023-08-30 22:39:59 +02:00
2e21040196 add private method get info album 2023-08-29 22:26:15 +02:00
b4d0fe8aa0 Merge pull request 'album' (#23) from album into master
Reviewed-on: #23
2023-08-25 21:47:47 +00:00
6401692d0d finish menu with album import 2023-08-25 23:46:43 +02:00
1fc9c48d2c fix add menu with album 2023-08-24 22:43:15 +02:00
d9c20cedcb add menu 2023-08-24 00:01:46 +02:00
4 changed files with 93 additions and 21 deletions

View File

@@ -55,7 +55,7 @@ def remove(index, number, args, basic, logger, ssl_wordpress):
def download(name_thread, max_thread, url, logger, parser, directory, html, img, ssl_canalblog, revert, tmp):
exportWp = WPExport(name="Thread-{0}".format(int(name_thread) + 1), url=url, logger=logger, parser=parser, directory=directory, ssl_canalblog=ssl_canalblog)
if not revert:
if revert is False:
exportWp.getUrlPage(name_thread, max_thread)
for i in ["article", "page"]:
for j in ["publications", "principal"]:
@@ -170,7 +170,7 @@ if __name__ == '__main__':
import_parser.add_argument("--no-create", help="No create post", dest="create", default="store_false", action="store_true")
import_parser.add_argument("--no-update", help="No update post", dest="update", default="store_false", action="store_true")
import_parser.add_argument("--no-image", help="No image add or update", dest="image", default="store_false", action="store_true")
import_parser.add_argument("--author", help="Define author for page album", default="")
import_parser.add_argument("--author", dest="author", help="Define author", default="")
@@ -259,12 +259,11 @@ if __name__ == '__main__':
basic = HTTPBasicAuth(args.user, password)
if args.command == "import":
wordpress = args.wordpress.split(",")
importWp = WPimport(basic=basic, wordpress="", logger=logger, parser=args.parser, ssl_wordpress=ssl_wordpress, author=args.author)
importWp = WPimport(basic=basic, wordpress="", logger=logger, parser=args.parser, ssl_wordpress=ssl_wordpress, author=args.author, ssl_canalblog=ssl_canalblog)
if len(args.file) > 0:
for i in wordpress:
importWp.setUrl(i)
importWp.fromFile(files=args.file.split(","))
exit(0)
menuWp = WPMenu(name="Thread-1", basic=basic, wordpress=args.wordpress, logger=logger, parser=args.parser, ssl_canalblog=ssl_canalblog, ssl_wordpress=ssl_wordpress)
menuWp.fromFile("{0}".format(args.file.split(",")[0]))
if len(args.directory) > 0:

View File

@@ -58,14 +58,26 @@ class WPExport:
# Download HTML
def downloadHTML(self, first, second):
self._downloadPage(webpage[first][second], self._dir)
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
webpage = json.loads(file.read())
self._downloadPage(webpage[first][second], self._dir)
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
# Download Image
def downloadImg(self, first, second):
page_src = self._getImg(webpage[first][second])
o = urlparse(self._url)
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
try:
with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
webpage = json.loads(file.read())
page_src = self._getImg(webpage[first][second])
o = urlparse(self._url)
self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
except Exception as ex:
self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))
# Get URL

View File

@@ -8,7 +8,7 @@ from requests.packages.urllib3.util.retry import Retry
class WPimport:
# Constructor
def __init__(self, name="Thread-0", basic=None, wordpress="", logger=None, parser="html.parser", ssl_wordpress=True, no_create=False, no_update=False, no_image=False, tmp="/tmp/import_export_canablog", author=""):
def __init__(self, name="Thread-0", basic=None, wordpress="", logger=None, parser="html.parser", ssl_wordpress=True, no_create=False, no_update=False, no_image=False, tmp="/tmp/import_export_canablog", author="", ssl_canalblog=True):
self._name = name
self._basic = basic
self._wordpress = wordpress
@@ -20,7 +20,7 @@ class WPimport:
if ssl_wordpress is False:
self._protocol = "http"
self._request = requests.Session()
self._ssl_canalblog = ssl_canalblog
retries = Retry(connect=10, read=10, redirect=5,
status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)
@@ -139,6 +139,40 @@ class WPimport:
self._logger.error("{0} : Exception error for get author : {1}".format(self._name, err))
return author
def _getInfoAlbum(self, link):
if self._ssl_canalblog:
link = link.replace("http", "https")
self._logger.info("{0} : Info album : {1}".format(self._name, link))
link_o = urlparse(link)
if len(link_o.netloc) > 0:
self._logger.info("{0} : get album info from web : {1}".format(self._name, link_o))
try:
response = self._request.get(link)
if response.status_code == 200:
self._logger.info("{0} : get content info from web : {1}".format(self._name, link))
page_img = response.content
except ConnectionError as err:
self._logger.error("{0} : Connection error for get album info : {1}".format(self._name, err))
exit(1)
except Exception as err:
self._logger.error("{0} : Exception error for get album info : {1}".format(self._name, err))
exit(1)
else:
self._logger.info("{0} : get album info from file : {1}".format(self._name, link_o))
if os.path.exists("{0}/..{1}".format(self._directory, link_o)):
page_img = open("{0}/..{1}".format(self._directory, link_o), "r")
soup = BeautifulSoup(page_img, self._parser)
paragraph = soup.find("div", class_="albumbody").find("p")
self._logger.info("{0} get paragraph : {1}".format(self._name, paragraph))
split_paragraph = str(paragraph).split("<br>")
self._logger.info("{0} length paragraph splitted : {1}".format(self._name, len(split_paragraph)))
if len(split_paragraph) == 1:
split_paragraph = str(paragraph).split("<br/>")
self._logger.info("{0} get paragraph splitted : {1}".format(self._name, split_paragraph))
author = split_paragraph[1].split(":")[1].replace(" ", "").lower()
return author
def _addOrUpdateAlbum(self, soup):
self._logger.info("{0} : Add/Update Album".format(self._name))
albumbody = soup.find("div", class_="albumbody")
@@ -195,8 +229,19 @@ class WPimport:
data = {"title":albumtitle, "content":content_html, "status":"publish"}
if len(self._author) > 0:
author = self._getAuthor(self._author)
if author != 0:
data = {"title":albumtitle, "content":content_html, "status":"publish", "author":author}
else:
link_a = albumbody.find_all("a")
for i in link_a:
if re.search(r"/albums/", i.get("href", "/")):
href_a = i.get("href", "/")
break
author = self._getInfoAlbum(href_a)
self._logger.info("{0} : author : {1}".format(self._name, author))
author = self._getAuthor(author)
data = {"title":albumtitle, "content":content_html, "status":"publish"}
if author != 0:
data = {"title":albumtitle, "content":content_html, "status":"publish", "author":author}
self._logger.debug("{0} : data for album page : {1}".format(self._name, data))
for index in range(1,10):
params = {"search": albumtitle, "per_page":100, "page": index}
@@ -807,6 +852,8 @@ class WPimport:
title = articletitle[0].text
author = articleacreator[0].text.lower()
if len(self._author) > 0:
author = self._author
body = articlebody[0].find_all("p")
bodyhtml = "<p>"
for i in body:

View File

@@ -85,19 +85,28 @@ class WPMenu:
def _getIdfromTitlePost(self, content):
idMenu = {"id":0, "type":"", "link":""}
soup = BeautifulSoup(content, self._parser)
articletitle = soup.find("h2", class_="articletitle").get_text()
articletitle = soup.find_all("h2", class_="articletitle")
if len(articletitle) > 0:
articletitle = articletitle[0].get_text()
search = "posts"
post_type = "post"
if len(articletitle) == 0:
articletitle = soup.find("div", class_="albumbody").find("h2").get_text()
articletitle = soup.find_all("div", class_="albumbody")
if len(articletitle) > 0:
articletitle = articletitle[0].find("h2").get_text()
search = "pages"
post_type = "page"
exist = False
for index in range(1,10):
if exist is False:
params = {"search":articletitle, "per_page":100, "page":index}
try:
self._logger.debug("{0} : Get Url for post : {1} {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/posts".format(self._wordpress, self._protocol_wordpress), params))
page = self._request_wordpress.get("{1}://{0}/wp-json/wp/v2/posts".format(self._wordpress, self._protocol_wordpress), auth=self._basic, params=params)
self._logger.debug("{0} : Get Url for {3} : {1} {2}".format(self._name, "{1}://{0}/wp-json/wp/v2/{2}".format(self._wordpress, self._protocol_wordpress, search), params, search))
page = self._request_wordpress.get("{1}://{0}/wp-json/wp/v2/{2}".format(self._wordpress, self._protocol_wordpress, search), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
self._logger.info("{0} : Get content post : {1}".format(self._name, len(result)))
self._logger.info("{0} : Get content {2} : {1}".format(self._name, len(result), search))
if len(result) > 0:
for i in result:
title_rendered = i["title"]["rendered"]
@@ -105,8 +114,8 @@ class WPMenu:
title_rendered = self._replaceCaracter(title_rendered)
self._logger.debug("{0} : comparaison debug {1} {2}".format(self._name, articletitle, title_rendered))
if articletitle == title_rendered:
self._logger.debug("{0} : get post id : {1}".format(self._name, i))
idMenu = {"id":i["id"], "type":"post", "link": i["link"]}
self._logger.debug("{0} : get {2} id : {1}".format(self._name, i, search))
idMenu = {"id":i["id"], "type":post_type, "link": i["link"]}
exist = True
else:
self._logger.debug("{0} : {2} {1}".format(self._name, result, len(result)))
@@ -156,13 +165,18 @@ class WPMenu:
idMenu = {"id":0, "type":"", "link":""}
if href != "#":
title = href[::-1]
second_title = title.split("/")[2]
second_title = second_title[::-1]
link = title.split("/")[0]
link = link[::-1]
title = title.split("/")[1]
title = title[::-1]
self._logger.info("{0} link {1} title {2}".format(self._name, link, title))
if link == "index.html":
idMenu = self._getId(title)
if second_title == "albums":
idMenu = self._getIdFromPost(href)
else:
idMenu = self._getId(title)
else:
idMenu = self._getIdFromPost(href)