From eae95d5671234e38316d565aa6e0bd7fd1116cb6 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 23 Mar 2023 23:28:57 +0100
Subject: [PATCH 01/60] add script insert wip
---
insert_wordpress.py | 50 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)
create mode 100644 insert_wordpress.py
diff --git a/insert_wordpress.py b/insert_wordpress.py
new file mode 100644
index 0000000..3576885
--- /dev/null
+++ b/insert_wordpress.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python3
+from bs4 import BeautifulSoup
+from urllib.parse import urlparse
+from requests.auth import HTTPBasicAuth
+from getpass import getpass
+import requests, os, argparse, logging
+
+
+if __name__ == '__main__':
+
+ month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--user", help="wordpress user", required=True)
+ parser.add_argument("--file", help="HTML file", required=True)
+ args = parser.parse_args()
+ password = getpass()
+ if len(password) == 0:
+ print("No password error !!! ")
+ exit(1)
+ basic = HTTPBasicAuth(args.user, password)
+
+ with open(args.file, 'r') as f:
+ contents = f.read()
+
+ soup = BeautifulSoup(contents, 'html.parser')
+ articletitle = soup.find_all("h2", class_="articletitle")
+ articlebody = soup.find_all("div", class_="articlebody")
+ articledate = soup.find_all("span", class_="articledate")
+ dateheader = soup.find_all("div", class_="dateheader")
+ itemfooter = soup.find_all("div", class_="itemfooter")
+ a = itemfooter[0].find_all("a", {"rel": True})
+ tag = []
+ for i in a:
+ rel = i.get("rel")
+ if rel[0] == 'tag':
+ tag.append(i.text)
+ print(tag)
+ title = articletitle[0].text
+ body = articlebody[0]
+ hour = articledate[0].text
+ time = dateheader[0].text.split(" ")
+ data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": [5]}
+ #print(data)
+ exit(0)
+
+ page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
+ print(page.status_code)
+ if page.status_code == 201:
+ print(page.content)
\ No newline at end of file
From 3622e379428d5843cdfe767754ad5ba975cd278c Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 23 Mar 2023 23:49:42 +0100
Subject: [PATCH 02/60] add tags
---
insert_wordpress.py | 31 ++++++++++++++++++++++++++-----
1 file changed, 26 insertions(+), 5 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 3576885..cd9b3ed 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -7,9 +7,8 @@ import requests, os, argparse, logging
if __name__ == '__main__':
-
+ tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
-
parser = argparse.ArgumentParser()
parser.add_argument("--user", help="wordpress user", required=True)
parser.add_argument("--file", help="HTML file", required=True)
@@ -20,6 +19,13 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
+ page = requests.get("http://localhost:8080/wp-json/wp/v2/tags")
+
+ if page.status_code == 200:
+ tags = page.json()
+ print(tags)
+
+
with open(args.file, 'r') as f:
contents = f.read()
@@ -35,13 +41,28 @@ if __name__ == '__main__':
rel = i.get("rel")
if rel[0] == 'tag':
tag.append(i.text)
- print(tag)
+ listtag = []
+ for i in tag:
+ tag_exist = False
+ for j in tags:
+ if j["name"] == i:
+ tag_exist = True
+ listtag.append(j["id"])
+ if tag_exist is False:
+ data = {"name": i}
+ page = requests.post("http://localhost:8080/wp-json/wp/v2/tags", auth=basic, data=data)
+ if page.status_code == 201:
+ result = page.json()
+ listtag.append(result["id"])
+
+
+
title = articletitle[0].text
body = articlebody[0]
hour = articledate[0].text
time = dateheader[0].text.split(" ")
- data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": [5]}
- #print(data)
+ data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listtag}
+ print(data)
exit(0)
page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
From 0c41dc3e651bb91c6b229ce83194a6421e79f703 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 27 Mar 2023 23:51:51 +0200
Subject: [PATCH 03/60] distinct tags and categories
---
insert_wordpress.py | 58 ++++++++++++++++++++++++++-------------------
1 file changed, 33 insertions(+), 25 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index cd9b3ed..86fab01 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -3,7 +3,7 @@ from bs4 import BeautifulSoup
from urllib.parse import urlparse
from requests.auth import HTTPBasicAuth
from getpass import getpass
-import requests, os, argparse, logging
+import requests, os, argparse, logging, re
if __name__ == '__main__':
@@ -19,13 +19,19 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- page = requests.get("http://localhost:8080/wp-json/wp/v2/tags")
-
- if page.status_code == 200:
- tags = page.json()
- print(tags)
+ liste = ["categories", "tags"]
+ elements = {}
+ element = {}
+ listelement = {}
+ for i in liste:
+ page = requests.get("http://localhost:8080/wp-json/wp/v2/{0}".format(i))
+ if page.status_code == 200:
+ elements[i] = page.json()
+ element[i] = []
+ listelement[i] = []
+
with open(args.file, 'r') as f:
contents = f.read()
@@ -36,34 +42,36 @@ if __name__ == '__main__':
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
a = itemfooter[0].find_all("a", {"rel": True})
- tag = []
for i in a:
rel = i.get("rel")
if rel[0] == 'tag':
- tag.append(i.text)
- listtag = []
- for i in tag:
- tag_exist = False
- for j in tags:
- if j["name"] == i:
- tag_exist = True
- listtag.append(j["id"])
- if tag_exist is False:
- data = {"name": i}
- page = requests.post("http://localhost:8080/wp-json/wp/v2/tags", auth=basic, data=data)
- if page.status_code == 201:
- result = page.json()
- listtag.append(result["id"])
+ href = i.get("href")
+ if re.search(r'/tag/', href):
+ element["tags"].append(i.text)
+ if re.search(r'/archives/', href):
+ element["categories"].append(i.text)
+ for i in liste:
+ for j in element[i]:
+ element_exist = False
+ for k in elements[i]:
+ if k["name"] == j:
+ element_exist = True
+ array = listelement[i].append(k["id"])
+ if element_exist is False:
+ data = {"name": j}
+ page = requests.post("http://localhost:8080/wp-json/wp/v2/{0}".format(i), auth=basic, data=data)
+ if page.status_code == 201:
+ result = page.json()
+ listelement[i].append(result["id"])
-
+
+
title = articletitle[0].text
body = articlebody[0]
hour = articledate[0].text
time = dateheader[0].text.split(" ")
- data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listtag}
- print(data)
- exit(0)
+ data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
print(page.status_code)
From 491f15ae3c0d12df0945a42458fc10a21c0c5bb0 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 11:31:25 +0200
Subject: [PATCH 04/60] premier essai d'insertion d'article reussi
---
insert_wordpress.py | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 86fab01..595a3d6 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -68,12 +68,27 @@ if __name__ == '__main__':
title = articletitle[0].text
- body = articlebody[0]
+ body = articlebody[0].find_all("p")
+ bodyhtml = ""
+ for i in body:
+ if len(i.text) == 1:
+ bodyhtml = bodyhtml + "
"
+ else:
+ bodyhtml = bodyhtml + str(i).replace("
", "").replace("
", "").replace("
", "
") + "
"
+ bodyhtml = bodyhtml + "
"
+ print(bodyhtml)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
- data = {"title":title, "content":body, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
+ data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
+ params = {"search":title}
+ page = requests.get("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, params=params)
+ page_exist = True
+ if page.status_code:
+ result= page.json()
+ if len(result) == 0:
+ page_exist = False
- page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
- print(page.status_code)
- if page.status_code == 201:
- print(page.content)
\ No newline at end of file
+ if page_exist == False:
+ page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
+ if page.status_code == 201:
+ print(page.content)
\ No newline at end of file
From 605bd06e51f2efd33d90fa114310bda2ccbba290 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 12:07:11 +0200
Subject: [PATCH 05/60] fix space
---
insert_wordpress.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 595a3d6..b2c2787 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -84,7 +84,7 @@ if __name__ == '__main__':
page = requests.get("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, params=params)
page_exist = True
if page.status_code:
- result= page.json()
+ result = page.json()
if len(result) == 0:
page_exist = False
From 82ce3d1a2b9b099882ad86e856aaa79141b85e8b Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 15:28:34 +0200
Subject: [PATCH 06/60] add author for article
---
insert_wordpress.py | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index b2c2787..d59a35a 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -39,6 +39,7 @@ if __name__ == '__main__':
articletitle = soup.find_all("h2", class_="articletitle")
articlebody = soup.find_all("div", class_="articlebody")
articledate = soup.find_all("span", class_="articledate")
+ articleacreator = soup.find_all("span", class_="articlecreator")
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
a = itemfooter[0].find_all("a", {"rel": True})
@@ -68,6 +69,15 @@ if __name__ == '__main__':
title = articletitle[0].text
+ author = articleacreator[0].text.lower()
+ author_exist = False
+ params = {"search":author}
+ page = requests.get("http://localhost:8080/wp-json/wp/v2/users", auth=basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ print(result)
+ author_id = result[0]["id"]
+ author_exist = True
body = articlebody[0].find_all("p")
bodyhtml = ""
for i in body:
@@ -76,10 +86,12 @@ if __name__ == '__main__':
else:
bodyhtml = bodyhtml + str(i).replace("
", "").replace("
", "").replace("
", "
") + "
"
bodyhtml = bodyhtml + ""
- print(bodyhtml)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
+ if author_exist == True:
+ data["author"] = author_id
+
params = {"search":title}
page = requests.get("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, params=params)
page_exist = True
From e3b9e92c23d69411b9aa2108761517393cde67e9 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 15:37:48 +0200
Subject: [PATCH 07/60] fix add author
---
insert_wordpress.py | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index d59a35a..5faee26 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -69,15 +69,7 @@ if __name__ == '__main__':
title = articletitle[0].text
- author = articleacreator[0].text.lower()
- author_exist = False
- params = {"search":author}
- page = requests.get("http://localhost:8080/wp-json/wp/v2/users", auth=basic, params=params)
- if page.status_code == 200:
- result = page.json()
- print(result)
- author_id = result[0]["id"]
- author_exist = True
+ author = articleacreator[0].text.lower()
body = articlebody[0].find_all("p")
bodyhtml = ""
for i in body:
@@ -88,9 +80,14 @@ if __name__ == '__main__':
bodyhtml = bodyhtml + "
"
hour = articledate[0].text
time = dateheader[0].text.split(" ")
+
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
- if author_exist == True:
- data["author"] = author_id
+ params = {"search":author}
+ page = requests.get("http://localhost:8080/wp-json/wp/v2/users", auth=basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ data["author"] = result[0]["id"]
+
params = {"search":title}
page = requests.get("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, params=params)
From dc0fd0c78137d466bbf67eda96ad01c5ca389ac9 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 16:40:15 +0200
Subject: [PATCH 08/60] insert comment 75%
---
insert_wordpress.py | 25 +++++++++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 5faee26..044292d 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -42,6 +42,19 @@ if __name__ == '__main__':
articleacreator = soup.find_all("span", class_="articlecreator")
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
+ comment = soup.find_all("div", class_="comment_item")
+ comment_post = []
+ for i in comment:
+ comment_item = i.text.split("\n")
+ footer = i.find_all("div", class_="itemfooter")
+ comment_author = footer[0].text.split(",")[0].replace("Posté par ", "")
+ comment_date = footer[0].find_all("abbr")[0].get("title")
+ comment_content = ""
+ for j in range(0, len(comment_item)-2):
+ if len(comment_item[j]) > 0:
+ comment_content = comment_content + comment_item[j] + "
"
+ comment_content = comment_content + "
"
+ comment_post.append({"author": comment_author, "date": comment_date, "content": comment_content})
a = itemfooter[0].find_all("a", {"rel": True})
for i in a:
rel = i.get("rel")
@@ -80,7 +93,7 @@ if __name__ == '__main__':
bodyhtml = bodyhtml + ""
hour = articledate[0].text
time = dateheader[0].text.split(" ")
-
+
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
page = requests.get("http://localhost:8080/wp-json/wp/v2/users", auth=basic, params=params)
@@ -100,4 +113,12 @@ if __name__ == '__main__':
if page_exist == False:
page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
if page.status_code == 201:
- print(page.content)
\ No newline at end of file
+ result = page.json()
+ print("Article ajoute : {0}".format(result["title"]["raw"]))
+ print(comment_post)
+ for i in comment_post:
+ data = {"post": result["id"], "content": i["content"], "date": i["date"]}
+ page = requests.post("http://localhost:8080/wp-json/wp/v2/comments", auth=basic, data=data)
+ print(page.status_code)
+ if page.status_code == 201:
+ print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
\ No newline at end of file
From d96d38e50879c870ff3246be3c7a60cd0fbcac44 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 16:43:56 +0200
Subject: [PATCH 09/60] add author name
---
insert_wordpress.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 044292d..be12c65 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -117,7 +117,7 @@ if __name__ == '__main__':
print("Article ajoute : {0}".format(result["title"]["raw"]))
print(comment_post)
for i in comment_post:
- data = {"post": result["id"], "content": i["content"], "date": i["date"]}
+ data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
page = requests.post("http://localhost:8080/wp-json/wp/v2/comments", auth=basic, data=data)
print(page.status_code)
if page.status_code == 201:
From 19229bc65bd0d332ce447a623c5d35ceb040f7ae Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 28 Mar 2023 22:29:55 +0200
Subject: [PATCH 10/60] add .gitgnore + add function
---
.gitignore | 2 ++
insert_wordpress.py | 33 ++++++++++++++++++++-------------
2 files changed, 22 insertions(+), 13 deletions(-)
create mode 100644 .gitignore
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d9171fe
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+backup/
+web_scrap.log
diff --git a/insert_wordpress.py b/insert_wordpress.py
index be12c65..babec3a 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -5,19 +5,9 @@ from requests.auth import HTTPBasicAuth
from getpass import getpass
import requests, os, argparse, logging, re
-
-if __name__ == '__main__':
+def insertWordpress(file, basic):
tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
- parser = argparse.ArgumentParser()
- parser.add_argument("--user", help="wordpress user", required=True)
- parser.add_argument("--file", help="HTML file", required=True)
- args = parser.parse_args()
- password = getpass()
- if len(password) == 0:
- print("No password error !!! ")
- exit(1)
- basic = HTTPBasicAuth(args.user, password)
liste = ["categories", "tags"]
elements = {}
@@ -32,7 +22,7 @@ if __name__ == '__main__':
listelement[i] = []
- with open(args.file, 'r') as f:
+ with open(file, 'r') as f:
contents = f.read()
soup = BeautifulSoup(contents, 'html.parser')
@@ -109,6 +99,8 @@ if __name__ == '__main__':
result = page.json()
if len(result) == 0:
page_exist = False
+ else:
+ print("La page {0} existe deja".format(title))
if page_exist == False:
page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
@@ -121,4 +113,19 @@ if __name__ == '__main__':
page = requests.post("http://localhost:8080/wp-json/wp/v2/comments", auth=basic, data=data)
print(page.status_code)
if page.status_code == 201:
- print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
\ No newline at end of file
+ print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--user", help="wordpress user", required=True)
+ parser.add_argument("--file", help="HTML file", required=True)
+ args = parser.parse_args()
+ password = getpass()
+ if len(password) == 0:
+ print("No password error !!! ")
+ exit(1)
+
+ basic = HTTPBasicAuth(args.user, password)
+ insertWordpress(args.file, basic)
\ No newline at end of file
From f250637912f8432ca9a578d66300efad32a153e3 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Wed, 29 Mar 2023 22:31:35 +0200
Subject: [PATCH 11/60] add class + wordpress
---
insert_wordpress.py | 101 +++++++++++++++++++++++---------------------
1 file changed, 54 insertions(+), 47 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index babec3a..c0c4d3d 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -5,27 +5,33 @@ from requests.auth import HTTPBasicAuth
from getpass import getpass
import requests, os, argparse, logging, re
-def insertWordpress(file, basic):
- tags = []
- month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
-
- liste = ["categories", "tags"]
- elements = {}
- element = {}
- listelement = {}
-
- for i in liste:
- page = requests.get("http://localhost:8080/wp-json/wp/v2/{0}".format(i))
- if page.status_code == 200:
- elements[i] = page.json()
- element[i] = []
- listelement[i] = []
-
+class WPimport:
- with open(file, 'r') as f:
- contents = f.read()
+ def __init__(self, basic, wordpress):
+ self.basic = basic
+ self.wordpress = wordpress
- soup = BeautifulSoup(contents, 'html.parser')
+ def fromFile(self, file):
+ with open(file, 'r') as f:
+ contents = f.read()
+ self.insertWordpress(contents)
+
+ def insertWordpress(self, content):
+ tags = []
+ month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
+ liste = ["categories", "tags"]
+ elements = {}
+ element = {}
+ listelement = {}
+
+ for i in liste:
+ page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self.wordpress,i))
+ if page.status_code == 200:
+ elements[i] = page.json()
+ element[i] = []
+ listelement[i] = []
+
+ soup = BeautifulSoup(content, 'html.parser')
articletitle = soup.find_all("h2", class_="articletitle")
articlebody = soup.find_all("div", class_="articlebody")
articledate = soup.find_all("span", class_="articledate")
@@ -63,14 +69,11 @@ def insertWordpress(file, basic):
array = listelement[i].append(k["id"])
if element_exist is False:
data = {"name": j}
- page = requests.post("http://localhost:8080/wp-json/wp/v2/{0}".format(i), auth=basic, data=data)
+ page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self.wordpress, i), auth=self.basic, data=data)
if page.status_code == 201:
result = page.json()
listelement[i].append(result["id"])
-
-
-
title = articletitle[0].text
author = articleacreator[0].text.lower()
body = articlebody[0].find_all("p")
@@ -86,34 +89,36 @@ def insertWordpress(file, basic):
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
- page = requests.get("http://localhost:8080/wp-json/wp/v2/users", auth=basic, params=params)
+ page = requests.get("http://{0}/wp-json/wp/v2/users".format(self.wordpress), auth=self.basic, params=params)
if page.status_code == 200:
result = page.json()
data["author"] = result[0]["id"]
-
- params = {"search":title}
- page = requests.get("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, params=params)
- page_exist = True
- if page.status_code:
- result = page.json()
- if len(result) == 0:
- page_exist = False
- else:
- print("La page {0} existe deja".format(title))
-
- if page_exist == False:
- page = requests.post("http://localhost:8080/wp-json/wp/v2/posts", auth=basic, data=data)
- if page.status_code == 201:
+ params = {"search":title}
+ page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, params=params)
+ page_exist = True
+ if page.status_code:
result = page.json()
- print("Article ajoute : {0}".format(result["title"]["raw"]))
- print(comment_post)
- for i in comment_post:
- data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
- page = requests.post("http://localhost:8080/wp-json/wp/v2/comments", auth=basic, data=data)
- print(page.status_code)
- if page.status_code == 201:
- print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ if len(result) == 0:
+ page_exist = False
+ else:
+ print("La page {0} existe deja".format(title))
+
+ if page_exist == False:
+ page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, data=data)
+ if page.status_code == 201:
+ result = page.json()
+ print("Article ajoute : {0}".format(result["title"]["raw"]))
+ print(comment_post)
+ for i in comment_post:
+ data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
+ page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self.wordpress), auth=self.basic, data=data)
+ print(page.status_code)
+ if page.status_code == 201:
+ print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+
+
+
@@ -121,6 +126,7 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--user", help="wordpress user", required=True)
parser.add_argument("--file", help="HTML file", required=True)
+ parser.add_argument("--wordpress", help="URL Wordpress", required=True)
args = parser.parse_args()
password = getpass()
if len(password) == 0:
@@ -128,4 +134,5 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- insertWordpress(args.file, basic)
\ No newline at end of file
+ importWp = WPimport(basic)
+ importWp.fromFile(args.file)
\ No newline at end of file
From e1b0c0cba80ff0a2ec2fd1253d07a4ca99ee5c2a Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Wed, 29 Mar 2023 22:59:15 +0200
Subject: [PATCH 12/60] img wip
---
insert_wordpress.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index c0c4d3d..216b483 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -39,6 +39,9 @@ class WPimport:
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
comment = soup.find_all("div", class_="comment_item")
+ img = articlebody[0].find_all("img")
+ print(img)
+ exit(0)
comment_post = []
for i in comment:
comment_item = i.text.split("\n")
@@ -134,5 +137,5 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- importWp = WPimport(basic)
+ importWp = WPimport(basic, args.wordpress)
importWp.fromFile(args.file)
\ No newline at end of file
From 301f1e2d4b698dfa1e7d2fe492e5cc399c005c5f Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 30 Mar 2023 23:29:29 +0200
Subject: [PATCH 13/60] add img successful in media
---
insert_wordpress.py | 29 +++++++++++++++++++++++++++--
1 file changed, 27 insertions(+), 2 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 216b483..389cb7f 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -39,8 +39,33 @@ class WPimport:
dateheader = soup.find_all("div", class_="dateheader")
itemfooter = soup.find_all("div", class_="itemfooter")
comment = soup.find_all("div", class_="comment_item")
- img = articlebody[0].find_all("img")
- print(img)
+ img_a = articlebody[0].find_all("a", {"target": "_blank"})
+ list_img = []
+ for i in img_a:
+ new_img = {}
+ img = i.find_all("img")
+ if len(img) > 0:
+ href_a = i.get("href")
+ href_img = img[0].get("src")
+ page_img = requests.get(href_img)
+ if page_img.status_code == 404:
+ href_img = href_a
+ page = requests.get(href_img)
+ if page.status_code == 200:
+ split_fileimg = href_img.split("/")
+ img_name = split_fileimg[len(split_fileimg)-1]
+ data = page.content
+ img_type = "image/png"
+ if img_name.split(".")[1]:
+ img_type = "image/jpg"
+ headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, headers=headers, data=data)
+ if r.status_code == 201:
+ new_img["old_src"]=href_img
+ new_img["id"]=r.json()["id"]
+ new_img["new_src"]=r.json()["guid"]["rendered"]
+ list_img.append(new_img)
+ print(list_img)
exit(0)
comment_post = []
for i in comment:
From c92f24e6af681dd3737d4e63c6b8a8754600b356 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 30 Mar 2023 23:50:25 +0200
Subject: [PATCH 14/60] check image exist in media
---
insert_wordpress.py | 44 +++++++++++++++++++++++++++++++++-----------
1 file changed, 33 insertions(+), 11 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 389cb7f..780844d 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -48,25 +48,37 @@ class WPimport:
href_a = i.get("href")
href_img = img[0].get("src")
page_img = requests.get(href_img)
+ img_break = False
if page_img.status_code == 404:
href_img = href_a
+ img_break = True
page = requests.get(href_img)
if page.status_code == 200:
+
split_fileimg = href_img.split("/")
img_name = split_fileimg[len(split_fileimg)-1]
- data = page.content
- img_type = "image/png"
- if img_name.split(".")[1]:
- img_type = "image/jpg"
- headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, headers=headers, data=data)
- if r.status_code == 201:
+ params = { "search": img_name}
+ r = requests.get("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, params=params)
+ if r.status_code == 200:
+ res = r.json()
+ if len(res) == 0:
+ data = page.content
+ img_type = "image/png"
+ if img_name.split(".")[1]:
+ img_type = "image/jpg"
+ headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, headers=headers, data=data)
+ if r.status_code == 201:
+ res = r.json()
+
new_img["old_src"]=href_img
- new_img["id"]=r.json()["id"]
- new_img["new_src"]=r.json()["guid"]["rendered"]
+ new_img["old_href"]=href_a
+ new_img["id"]=res["id"]
+ new_img["new_src"]=res["guid"]["rendered"]
+ new_img["break"]=img_break
list_img.append(new_img)
- print(list_img)
- exit(0)
+
+
comment_post = []
for i in comment:
comment_item = i.text.split("\n")
@@ -112,6 +124,16 @@ class WPimport:
else:
bodyhtml = bodyhtml + str(i).replace("", "").replace("
", "").replace("
", "
") + "
"
bodyhtml = bodyhtml + ""
+ for i in list_img:
+ o = urlparse(i["new_src"])
+ if i == True:
+ print(i["old_href"])
+ bodyhtml = bodyhtml.replace(i["old_href"], o.path)
+ else:
+ print(i["old_src"])
+ bodyhtml = bodyhtml.replace(i["old_src"], o.path)
+ print(bodyhtml)
+ exit(0)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
From 90881eb03714db8ab9c6972e0b91087beadc6762 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Fri, 31 Mar 2023 00:05:11 +0200
Subject: [PATCH 15/60] add media in body html
---
insert_wordpress.py | 24 +++++++++++-------------
1 file changed, 11 insertions(+), 13 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 780844d..a5a86eb 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -49,6 +49,8 @@ class WPimport:
href_img = img[0].get("src")
page_img = requests.get(href_img)
img_break = False
+ new_img["old_src"]=href_img
+ new_img["old_href"]=href_a
if page_img.status_code == 404:
href_img = href_a
img_break = True
@@ -70,11 +72,13 @@ class WPimport:
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, headers=headers, data=data)
if r.status_code == 201:
res = r.json()
-
- new_img["old_src"]=href_img
- new_img["old_href"]=href_a
- new_img["id"]=res["id"]
- new_img["new_src"]=res["guid"]["rendered"]
+ id_res = res["id"]
+ rendered = res["guid"]["rendered"]
+ else:
+ id_res = res[0]["id"]
+ rendered = res[0]["guid"]["rendered"]
+ new_img["id"]=id_res
+ new_img["new_src"]=rendered
new_img["break"]=img_break
list_img.append(new_img)
@@ -126,14 +130,8 @@ class WPimport:
bodyhtml = bodyhtml + ""
for i in list_img:
o = urlparse(i["new_src"])
- if i == True:
- print(i["old_href"])
- bodyhtml = bodyhtml.replace(i["old_href"], o.path)
- else:
- print(i["old_src"])
- bodyhtml = bodyhtml.replace(i["old_src"], o.path)
- print(bodyhtml)
- exit(0)
+ bodyhtml = bodyhtml.replace(i["old_href"], o.path)
+ bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
From 066d8cae52c771ceeee9d340b4dfedcde8437238 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Fri, 31 Mar 2023 00:06:47 +0200
Subject: [PATCH 16/60] remove print
---
insert_wordpress.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index a5a86eb..9e40faa 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -157,11 +157,9 @@ class WPimport:
if page.status_code == 201:
result = page.json()
print("Article ajoute : {0}".format(result["title"]["raw"]))
- print(comment_post)
for i in comment_post:
data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self.wordpress), auth=self.basic, data=data)
- print(page.status_code)
if page.status_code == 201:
print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
From e0b4895b62072edd65da8aa560e9d59cca55c342 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Fri, 31 Mar 2023 00:14:38 +0200
Subject: [PATCH 17/60] association image article
---
insert_wordpress.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 9e40faa..0f1b2fa 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -162,6 +162,12 @@ class WPimport:
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self.wordpress), auth=self.basic, data=data)
if page.status_code == 201:
print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ for i in list_img:
+ data = {"post": result["id"]}
+ r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self.wordpress, i["id"]), auth=self.basic, data=data)
+ if r.status_code == 200:
+ print("Association d'une image à l'article {0}".format(result["title"]["raw"]))
+
From bcb3abce016e4f038c499ccd3170cc792e0135f1 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 1 Apr 2023 00:11:33 +0200
Subject: [PATCH 18/60] fix variable
---
insert_wordpress.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 0f1b2fa..854c412 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -110,7 +110,7 @@ class WPimport:
for k in elements[i]:
if k["name"] == j:
element_exist = True
- array = listelement[i].append(k["id"])
+ listelement[i].append(k["id"])
if element_exist is False:
data = {"name": j}
page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self.wordpress, i), auth=self.basic, data=data)
@@ -145,7 +145,7 @@ class WPimport:
params = {"search":title}
page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, params=params)
page_exist = True
- if page.status_code:
+ if page.status_code == 200:
result = page.json()
if len(result) == 0:
page_exist = False
From faa22f1438532d65e104cf9c471b924299f2264e Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 1 Apr 2023 00:18:56 +0200
Subject: [PATCH 19/60] update post
---
insert_wordpress.py | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 854c412..e327729 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -150,7 +150,14 @@ class WPimport:
if len(result) == 0:
page_exist = False
else:
- print("La page {0} existe deja".format(title))
+ print("La page {0} existe deja et mis à jour".format(title))
+ post_id = result[0]["id"]
+ page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self.wordpress, post_id), auth=self.basic, data=data)
+ if page.status_code == 200:
+ result = page.json()
+ print("Article mis à jour : {0}".format(result["title"]["raw"]))
+
+
if page_exist == False:
page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, data=data)
From 4054f41e9bf1af96ca6b2a8377ba05d1d0738d8d Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 1 Apr 2023 18:32:17 +0200
Subject: [PATCH 20/60] add json dumps for post
---
insert_wordpress.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index e327729..662bcda 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -3,7 +3,7 @@ from bs4 import BeautifulSoup
from urllib.parse import urlparse
from requests.auth import HTTPBasicAuth
from getpass import getpass
-import requests, os, argparse, logging, re
+import requests, os, argparse, logging, re, json
class WPimport:
@@ -134,7 +134,6 @@ class WPimport:
bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
-
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
page = requests.get("http://{0}/wp-json/wp/v2/users".format(self.wordpress), auth=self.basic, params=params)
@@ -145,6 +144,7 @@ class WPimport:
params = {"search":title}
page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, params=params)
page_exist = True
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
if page.status_code == 200:
result = page.json()
if len(result) == 0:
@@ -152,7 +152,7 @@ class WPimport:
else:
print("La page {0} existe deja et mis à jour".format(title))
post_id = result[0]["id"]
- page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self.wordpress, post_id), auth=self.basic, data=data)
+ page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self.wordpress, post_id), auth=self.basic, headers=headers, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
print("Article mis à jour : {0}".format(result["title"]["raw"]))
@@ -160,7 +160,7 @@ class WPimport:
if page_exist == False:
- page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, data=data)
+ page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, headers=headers, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
print("Article ajoute : {0}".format(result["title"]["raw"]))
From f07f8c040f0c0d043a03b2bbd127e612787bddd6 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 13:06:10 +0200
Subject: [PATCH 21/60] add private method for association id
---
insert_wordpress.py | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 662bcda..ad30fd8 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -15,6 +15,13 @@ class WPimport:
with open(file, 'r') as f:
contents = f.read()
self.insertWordpress(contents)
+
+ def _linkImgPost(self, title, list_img, post_id):
+ for i in list_img:
+ data = {"post": post_id}
+ r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self.wordpress, i["id"]), auth=self.basic, data=data)
+ if r.status_code == 200:
+ print("Association d'une image à l'article {0}".format(title))
def insertWordpress(self, content):
tags = []
@@ -156,6 +163,7 @@ class WPimport:
if page.status_code == 200:
result = page.json()
print("Article mis à jour : {0}".format(result["title"]["raw"]))
+ self._linkImgPost(result["title"]["raw"], list_img, result["id"])
@@ -169,12 +177,7 @@ class WPimport:
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self.wordpress), auth=self.basic, data=data)
if page.status_code == 201:
print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
- for i in list_img:
- data = {"post": result["id"]}
- r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self.wordpress, i["id"]), auth=self.basic, data=data)
- if r.status_code == 200:
- print("Association d'une image à l'article {0}".format(result["title"]["raw"]))
-
+ self._linkImgPost(result["title"]["raw"], list_img, result["id"])
From cc33ab34df6256a4ca88c45e18351188a5d953ed Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 13:14:52 +0200
Subject: [PATCH 22/60] private variable
---
insert_wordpress.py | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index ad30fd8..897a1d4 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -8,22 +8,22 @@ import requests, os, argparse, logging, re, json
class WPimport:
def __init__(self, basic, wordpress):
- self.basic = basic
- self.wordpress = wordpress
+ self._basic = basic
+ self._wordpress = wordpress
def fromFile(self, file):
with open(file, 'r') as f:
contents = f.read()
- self.insertWordpress(contents)
+ self._insertWordpress(contents)
def _linkImgPost(self, title, list_img, post_id):
for i in list_img:
data = {"post": post_id}
- r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self.wordpress, i["id"]), auth=self.basic, data=data)
+ r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
print("Association d'une image à l'article {0}".format(title))
- def insertWordpress(self, content):
+ def _insertWordpress(self, content):
tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
liste = ["categories", "tags"]
@@ -32,7 +32,7 @@ class WPimport:
listelement = {}
for i in liste:
- page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self.wordpress,i))
+ page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress,i))
if page.status_code == 200:
elements[i] = page.json()
element[i] = []
@@ -67,7 +67,7 @@ class WPimport:
split_fileimg = href_img.split("/")
img_name = split_fileimg[len(split_fileimg)-1]
params = { "search": img_name}
- r = requests.get("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, params=params)
+ r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
if r.status_code == 200:
res = r.json()
if len(res) == 0:
@@ -76,7 +76,7 @@ class WPimport:
if img_name.split(".")[1]:
img_type = "image/jpg"
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post("http://{0}/wp-json/wp/v2/media".format(self.wordpress), auth=self.basic, headers=headers, data=data)
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
res = r.json()
id_res = res["id"]
@@ -120,7 +120,7 @@ class WPimport:
listelement[i].append(k["id"])
if element_exist is False:
data = {"name": j}
- page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self.wordpress, i), auth=self.basic, data=data)
+ page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i), auth=self._basic, data=data)
if page.status_code == 201:
result = page.json()
listelement[i].append(result["id"])
@@ -143,13 +143,13 @@ class WPimport:
time = dateheader[0].text.split(" ")
data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
- page = requests.get("http://{0}/wp-json/wp/v2/users".format(self.wordpress), auth=self.basic, params=params)
+ page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
result = page.json()
data["author"] = result[0]["id"]
params = {"search":title}
- page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, params=params)
+ page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, params=params)
page_exist = True
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
if page.status_code == 200:
@@ -159,7 +159,7 @@ class WPimport:
else:
print("La page {0} existe deja et mis à jour".format(title))
post_id = result[0]["id"]
- page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self.wordpress, post_id), auth=self.basic, headers=headers, data=json.dumps(data))
+ page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
print("Article mis à jour : {0}".format(result["title"]["raw"]))
@@ -168,13 +168,13 @@ class WPimport:
if page_exist == False:
- page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self.wordpress), auth=self.basic, headers=headers, data=json.dumps(data))
+ page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
print("Article ajoute : {0}".format(result["title"]["raw"]))
for i in comment_post:
data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
- page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self.wordpress), auth=self.basic, data=data)
+ page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
if page.status_code == 201:
print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
From 42b7e7e408b73d49e8bd6c19bd75eabe2ee498ed Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 16:56:07 +0200
Subject: [PATCH 23/60] get featured image for post wip
---
insert_wordpress.py | 42 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 37 insertions(+), 5 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 897a1d4..fb61e6b 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -6,24 +6,57 @@ from getpass import getpass
import requests, os, argparse, logging, re, json
class WPimport:
-
+ # Constructor
def __init__(self, basic, wordpress):
self._basic = basic
self._wordpress = wordpress
+ # Public method
+
def fromFile(self, file):
with open(file, 'r') as f:
- contents = f.read()
- self._insertWordpress(contents)
+ content = f.read()
+ soup = BeautifulSoup(content, 'html.parser')
+ articlebody = soup.find_all("div", class_="articlebody")
+ if len(articlebody) > 0:
+ self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
+ # Private method
+
+
+ ## Get or update featured image
+
+ def _addOrUpdateFeaturedMedia(self, soup):
+ item_div = soup.find_all("div", {"data-edittype": "post"})
+ for i in item_div:
+ h2 = i.find_all("h2")[0].text
+ params = {"search":h2, "type":"post"}
+ page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ if len(result) > 0:
+ if h2 == result[0]["title"]:
+ img = i.find_all("img")
+ if len(img) > 0:
+ img_src = img[0].get("src")
+ print(img_src)
+
+
+ ## Association image to post
+
def _linkImgPost(self, title, list_img, post_id):
for i in list_img:
data = {"post": post_id}
r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
print("Association d'une image à l'article {0}".format(title))
+
+
+ ## Add or Update post
- def _insertWordpress(self, content):
+ def _addOrUpdatePost(self, soup):
tags = []
month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
liste = ["categories", "tags"]
@@ -38,7 +71,6 @@ class WPimport:
element[i] = []
listelement[i] = []
- soup = BeautifulSoup(content, 'html.parser')
articletitle = soup.find_all("h2", class_="articletitle")
articlebody = soup.find_all("div", class_="articlebody")
articledate = soup.find_all("span", class_="articledate")
From cb64dd47ab554ac1fe3aa2596a7f2105c82e9620 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 17:34:55 +0200
Subject: [PATCH 24/60] create private method for add or update media
---
insert_wordpress.py | 64 ++++++++++++++++++++++++---------------------
1 file changed, 34 insertions(+), 30 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index fb61e6b..43597dc 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -52,7 +52,32 @@ class WPimport:
r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
print("Association d'une image à l'article {0}".format(title))
-
+
+ ## Add or update img
+
+ def _addOrUpdateMedia(self, href_img):
+ media = {"id":"", "rendered":""}
+ split_fileimg = href_img.split("/")
+ img_name = split_fileimg[len(split_fileimg)-1]
+ params = { "search": img_name}
+ r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
+ if r.status_code == 200:
+ res = r.json()
+ if len(res) == 0:
+ data = page.content
+ img_type = "image/png"
+ if img_name.split(".")[1]:
+ img_type = "image/jpg"
+ headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
+ if r.status_code == 201:
+ res = r.json()
+ media["id"] = res["id"]
+ media["rendered"] = res["guid"]["rendered"]
+ else:
+ media["id"] = res[0]["id"]
+ media["rendered"] = res[0]["guid"]["rendered"]
+ return media
## Add or Update post
@@ -86,42 +111,21 @@ class WPimport:
if len(img) > 0:
href_a = i.get("href")
href_img = img[0].get("src")
- page_img = requests.get(href_img)
- img_break = False
new_img["old_src"]=href_img
new_img["old_href"]=href_a
+ page_img = requests.get(href_img)
+ img_break = False
if page_img.status_code == 404:
href_img = href_a
img_break = True
- page = requests.get(href_img)
+ page = requests.get(href_a)
if page.status_code == 200:
-
- split_fileimg = href_img.split("/")
- img_name = split_fileimg[len(split_fileimg)-1]
- params = { "search": img_name}
- r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
- if r.status_code == 200:
- res = r.json()
- if len(res) == 0:
- data = page.content
- img_type = "image/png"
- if img_name.split(".")[1]:
- img_type = "image/jpg"
- headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
- if r.status_code == 201:
- res = r.json()
- id_res = res["id"]
- rendered = res["guid"]["rendered"]
- else:
- id_res = res[0]["id"]
- rendered = res[0]["guid"]["rendered"]
- new_img["id"]=id_res
- new_img["new_src"]=rendered
- new_img["break"]=img_break
- list_img.append(new_img)
+ media=self._addOrUpdateMedia(href_img)
+ new_img["id"]=media["id"]
+ new_img["new_src"]=media["rendered"]
+ new_img["break"]=img_break
+ list_img.append(new_img)
-
comment_post = []
for i in comment:
comment_item = i.text.split("\n")
From ec4135c5d024f72fdacee10bcf321b5db13d2fae Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 17:36:17 +0200
Subject: [PATCH 25/60] fix condition type file
---
insert_wordpress.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 43597dc..32f3439 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -66,7 +66,7 @@ class WPimport:
if len(res) == 0:
data = page.content
img_type = "image/png"
- if img_name.split(".")[1]:
+ if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
img_type = "image/jpg"
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
From 1e162662e65fb7adf500f6763b451b3869f8e16c Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 17:51:54 +0200
Subject: [PATCH 26/60] add featured media
---
insert_wordpress.py | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 32f3439..a9e71d9 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -41,7 +41,13 @@ class WPimport:
img = i.find_all("img")
if len(img) > 0:
img_src = img[0].get("src")
- print(img_src)
+ page = requests.get(img_src)
+ if page.status_code == 200:
+ media=self._addOrUpdateMedia(img_src, page)
+ data = {"featured_media": media["id"]}
+ r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, data=json.dumps(data))
+ if r.status_code == 200:
+ print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
## Association image to post
@@ -55,7 +61,7 @@ class WPimport:
## Add or update img
- def _addOrUpdateMedia(self, href_img):
+ def _addOrUpdateMedia(self, href_img, page):
media = {"id":"", "rendered":""}
split_fileimg = href_img.split("/")
img_name = split_fileimg[len(split_fileimg)-1]
@@ -120,7 +126,7 @@ class WPimport:
img_break = True
page = requests.get(href_a)
if page.status_code == 200:
- media=self._addOrUpdateMedia(href_img)
+ media=self._addOrUpdateMedia(href_img, page)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
new_img["break"]=img_break
From f77274f00e51073d5fbba2dd3f096ffcfe58d5ff Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 17:56:22 +0200
Subject: [PATCH 27/60] add headers json
---
insert_wordpress.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index a9e71d9..7db1fd1 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -44,8 +44,9 @@ class WPimport:
page = requests.get(img_src)
if page.status_code == 200:
media=self._addOrUpdateMedia(img_src, page)
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
data = {"featured_media": media["id"]}
- r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, data=json.dumps(data))
+ r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
if r.status_code == 200:
print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
From c9b1264153abf4b6ec46c554eae039abec07b452 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 2 Apr 2023 18:01:57 +0200
Subject: [PATCH 28/60] remove private method for featured media
---
insert_wordpress.py | 30 +-----------------------------
1 file changed, 1 insertion(+), 29 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 7db1fd1..fff8279 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -20,36 +20,8 @@ class WPimport:
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
- else:
- self._addOrUpdateFeaturedMedia(soup)
# Private method
-
-
- ## Get or update featured image
-
- def _addOrUpdateFeaturedMedia(self, soup):
- item_div = soup.find_all("div", {"data-edittype": "post"})
- for i in item_div:
- h2 = i.find_all("h2")[0].text
- params = {"search":h2, "type":"post"}
- page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
- if page.status_code == 200:
- result = page.json()
- if len(result) > 0:
- if h2 == result[0]["title"]:
- img = i.find_all("img")
- if len(img) > 0:
- img_src = img[0].get("src")
- page = requests.get(img_src)
- if page.status_code == 200:
- media=self._addOrUpdateMedia(img_src, page)
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- data = {"featured_media": media["id"]}
- r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
- if r.status_code == 200:
- print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
-
## Association image to post
@@ -184,7 +156,7 @@ class WPimport:
bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
- data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
+ data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"], "featured_media":list_img[0]["id"]}
params = {"search":author}
page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
From 501876dac23893a7823b70071e2776d255b9ced8 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 3 Apr 2023 23:45:48 +0200
Subject: [PATCH 29/60] add or update featured media
---
insert_wordpress.py | 54 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 47 insertions(+), 7 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index fff8279..06185fe 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -20,8 +20,46 @@ class WPimport:
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
# Private method
+
+ ## Add or update featured media
+
+
+ ## Get or update featured image
+
+ def _addOrUpdateFeaturedMedia(self, soup):
+ item_div = soup.find_all("div", {"data-edittype": "post"})
+ for i in item_div:
+ h2 = i.find_all("h2")[0].text
+ params = {"search":h2, "type":"post"}
+ page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ if len(result) > 0:
+ if h2 == result[0]["title"]:
+ img = i.find_all("img")
+ if len(img) > 0:
+ img_src = img[0].get("src")
+ page = requests.get(img_src)
+ if page.status_code == 200:
+ name_img = img_src.replace("_q", "")
+ name_img = name_img.split("/")[len(name_img.split("/"))-1]
+ params = {"search": name_img}
+ page = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ res = page.json()
+ if len(res) > 0:
+ id_media = res[0]["id"]
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ data = {"featured_media": id_media}
+ r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
+ if r.status_code == 200:
+ print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
+ else:
+ print("Aucun media trouvé")
## Association image to post
@@ -93,17 +131,19 @@ class WPimport:
new_img["old_src"]=href_img
new_img["old_href"]=href_a
page_img = requests.get(href_img)
- img_break = False
if page_img.status_code == 404:
href_img = href_a
- img_break = True
- page = requests.get(href_a)
- if page.status_code == 200:
- media=self._addOrUpdateMedia(href_img, page)
+ page_img = requests.get(href_a)
+ if page_img.status_code == 200:
+ media=self._addOrUpdateMedia(href_img, page_img)
new_img["id"]=media["id"]
new_img["new_src"]=media["rendered"]
- new_img["break"]=img_break
list_img.append(new_img)
+ if href_img != href_a:
+ media=self._addOrUpdateMedia(href_a, page_img)
+ new_img["id"]=media["id"]
+ new_img["new_src"]=media["rendered"]
+ list_img.append(new_img)
comment_post = []
for i in comment:
@@ -156,7 +196,7 @@ class WPimport:
bodyhtml = bodyhtml.replace(i["old_src"], o.path)
hour = articledate[0].text
time = dateheader[0].text.split(" ")
- data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"], "featured_media":list_img[0]["id"]}
+ data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
params = {"search":author}
page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
if page.status_code == 200:
From 404ad5dd6cd3ebd5ce5ca7aa27385833b8885f81 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 4 Apr 2023 00:00:28 +0200
Subject: [PATCH 30/60] update image not fix
---
insert_wordpress.py | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 06185fe..ba799d3 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -80,20 +80,20 @@ class WPimport:
r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
if r.status_code == 200:
res = r.json()
- if len(res) == 0:
- data = page.content
- img_type = "image/png"
- if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
- img_type = "image/jpg"
- headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
- if r.status_code == 201:
- res = r.json()
- media["id"] = res["id"]
- media["rendered"] = res["guid"]["rendered"]
- else:
- media["id"] = res[0]["id"]
- media["rendered"] = res[0]["guid"]["rendered"]
+ url = "http://{0}/wp-json/wp/v2/media".format(self._wordpress)
+ if len(res) > 0:
+ url = "http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"])
+ print(url)
+ data = page.content
+ img_type = "image/png"
+ if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
+ img_type = "image/jpg"
+ headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
+ r = requests.post(url, auth=self._basic, headers=headers, data=data)
+ if r.status_code == 201 or r.status_code == 200:
+ res = r.json()
+ media["id"] = res["id"]
+ media["rendered"] = res["guid"]["rendered"]
return media
## Add or Update post
From 665f1474f27b15e19d35607f7071f68d45d702e4 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 4 Apr 2023 22:07:36 +0200
Subject: [PATCH 31/60] delete and replace image
---
insert_wordpress.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index ba799d3..fd878cd 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -59,7 +59,7 @@ class WPimport:
if r.status_code == 200:
print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
else:
- print("Aucun media trouvé")
+ print("Aucun media trouvé pour {0}".format(h2))
## Association image to post
@@ -80,17 +80,18 @@ class WPimport:
r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
if r.status_code == 200:
res = r.json()
- url = "http://{0}/wp-json/wp/v2/media".format(self._wordpress)
if len(res) > 0:
- url = "http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"])
- print(url)
+ params = {"force":1}
+ r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
+ if r.status_code == 200:
+ print("Image supprimé {0}".format(img_name))
data = page.content
img_type = "image/png"
if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
img_type = "image/jpg"
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post(url, auth=self._basic, headers=headers, data=data)
- if r.status_code == 201 or r.status_code == 200:
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
+ if r.status_code == 201:
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
From ba511bc6c4251b7987712481a4ad98a2737d31e4 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 4 Apr 2023 22:14:10 +0200
Subject: [PATCH 32/60] Ajout print image ajoute
---
insert_wordpress.py | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index fd878cd..1f6812c 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -13,23 +13,21 @@ class WPimport:
# Public method
- def fromFile(self, file):
- with open(file, 'r') as f:
- content = f.read()
- soup = BeautifulSoup(content, 'html.parser')
- articlebody = soup.find_all("div", class_="articlebody")
- if len(articlebody) > 0:
- self._addOrUpdatePost(soup)
- else:
- self._addOrUpdateFeaturedMedia(soup)
+ def fromFile(self, files):
+ for file in files.split(","):
+ with open(file, 'r') as f:
+ content = f.read()
+ soup = BeautifulSoup(content, 'html.parser')
+ articlebody = soup.find_all("div", class_="articlebody")
+ if len(articlebody) > 0:
+ self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
# Private method
## Add or update featured media
-
- ## Get or update featured image
-
def _addOrUpdateFeaturedMedia(self, soup):
item_div = soup.find_all("div", {"data-edittype": "post"})
for i in item_div:
@@ -92,6 +90,7 @@ class WPimport:
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
+ print("Ajout d'image {0}".format(img_name))
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
From 5768b37cd1fa5ad21ae26c2d3c0c4e0d354692a0 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 6 Apr 2023 20:59:11 +0200
Subject: [PATCH 33/60] ajout de verification de existence fichier
---
insert_wordpress.py | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 1f6812c..2c3828e 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -15,14 +15,15 @@ class WPimport:
def fromFile(self, files):
for file in files.split(","):
- with open(file, 'r') as f:
- content = f.read()
- soup = BeautifulSoup(content, 'html.parser')
- articlebody = soup.find_all("div", class_="articlebody")
- if len(articlebody) > 0:
- self._addOrUpdatePost(soup)
- else:
- self._addOrUpdateFeaturedMedia(soup)
+ if os.path.exists(file):
+ with open(file, 'r') as f:
+ content = f.read()
+ soup = BeautifulSoup(content, 'html.parser')
+ articlebody = soup.find_all("div", class_="articlebody")
+ if len(articlebody) > 0:
+ self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
# Private method
From 1f7e442d04c1e8092a708e2cc12e4ad1606d5efb Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Thu, 6 Apr 2023 21:53:56 +0200
Subject: [PATCH 34/60] wip directory
---
insert_wordpress.py | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 2c3828e..c91964b 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -13,6 +13,13 @@ class WPimport:
# Public method
+ def fromDirectory(self, directory):
+ directory = "{0}/archives".format(directory)
+ for item in os.listdir(directory):
+ subdirectory = self._checkDirectory(item)
+ print(subdirectory)
+
+
def fromFile(self, files):
for file in files.split(","):
if os.path.exists(file):
@@ -27,6 +34,11 @@ class WPimport:
# Private method
+ def _checkDirectory(self, item):
+ if os.path.isdir(item):
+ self._checkDirectory(item)
+ return item
+
## Add or update featured media
def _addOrUpdateFeaturedMedia(self, soup):
@@ -243,7 +255,8 @@ class WPimport:
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--user", help="wordpress user", required=True)
- parser.add_argument("--file", help="HTML file", required=True)
+ parser.add_argument("--file", help="HTML file", default="")
+ parser.add_argument("--directory", help="HTML directory", default="")
parser.add_argument("--wordpress", help="URL Wordpress", required=True)
args = parser.parse_args()
password = getpass()
@@ -253,4 +266,8 @@ if __name__ == '__main__':
basic = HTTPBasicAuth(args.user, password)
importWp = WPimport(basic, args.wordpress)
- importWp.fromFile(args.file)
\ No newline at end of file
+ if len(args.file) > 0:
+ importWp.fromFile(args.file)
+ exit(0)
+ if len(args.directory) > 0:
+ importWp.fromDirectory(args.directory)
\ No newline at end of file
From 34115a3a7d89e9404c0c465a42b539ffe445b853 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Fri, 7 Apr 2023 22:38:34 +0200
Subject: [PATCH 35/60] recursive functions for directories
---
insert_wordpress.py | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index c91964b..db2e8a0 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -15,9 +15,8 @@ class WPimport:
def fromDirectory(self, directory):
directory = "{0}/archives".format(directory)
- for item in os.listdir(directory):
- subdirectory = self._checkDirectory(item)
- print(subdirectory)
+ subdirectory = self._checkDirectory([], "{0}".format(directory))
+ print(subdirectory)
def fromFile(self, files):
@@ -34,10 +33,13 @@ class WPimport:
# Private method
- def _checkDirectory(self, item):
- if os.path.isdir(item):
- self._checkDirectory(item)
- return item
+ def _checkDirectory(self, subdirectory, item):
+ sub = subdirectory
+ for i in os.listdir(item):
+ if os.path.isdir("{0}/{1}".format(item, i)):
+ sub.append("{0}/{1}".format(item, i))
+ subdirectory = self._checkDirectory(sub, "{0}/{1}".format(item, i))
+ return subdirectory
## Add or update featured media
From 9ab33c169eecfe246d3b2616df8f6b74bb852919 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Fri, 7 Apr 2023 22:55:27 +0200
Subject: [PATCH 36/60] add directory parameter
---
insert_wordpress.py | 28 +++++++++++++++++++++-------
1 file changed, 21 insertions(+), 7 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index db2e8a0..2c29e80 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -15,13 +15,15 @@ class WPimport:
def fromDirectory(self, directory):
directory = "{0}/archives".format(directory)
- subdirectory = self._checkDirectory([], "{0}".format(directory))
- print(subdirectory)
-
+ directories = self._getDirectories([], "{0}".format(directory))
+ files = self._getFiles(directories)
+ self.fromFile(files)
+
def fromFile(self, files):
- for file in files.split(","):
+ for file in files:
if os.path.exists(file):
+ print(file)
with open(file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
@@ -33,12 +35,24 @@ class WPimport:
# Private method
- def _checkDirectory(self, subdirectory, item):
+ ## Get all files
+
+ def _getFiles(self, item):
+ files = []
+ for i in item:
+ for j in os.listdir(i):
+ if os.path.isfile("{0}/{1}".format(i, j)):
+ files.append("{0}/{1}".format(i, j))
+ return files
+
+ ## Get directories
+
+ def _getDirectories(self, subdirectory, item):
sub = subdirectory
for i in os.listdir(item):
if os.path.isdir("{0}/{1}".format(item, i)):
sub.append("{0}/{1}".format(item, i))
- subdirectory = self._checkDirectory(sub, "{0}/{1}".format(item, i))
+ subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
return subdirectory
## Add or update featured media
@@ -269,7 +283,7 @@ if __name__ == '__main__':
basic = HTTPBasicAuth(args.user, password)
importWp = WPimport(basic, args.wordpress)
if len(args.file) > 0:
- importWp.fromFile(args.file)
+ importWp.fromFile(args.file.split(","))
exit(0)
if len(args.directory) > 0:
importWp.fromDirectory(args.directory)
\ No newline at end of file
From d58ead52b2a157e8a0743a185463e17a4334f800 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 12:17:43 +0200
Subject: [PATCH 37/60] replace print by logger
---
insert_wordpress.py | 55 ++++++++++++++++++++++++++++++++++-----------
1 file changed, 42 insertions(+), 13 deletions(-)
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 2c29e80..2fc36fa 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -7,9 +7,10 @@ import requests, os, argparse, logging, re, json
class WPimport:
# Constructor
- def __init__(self, basic, wordpress):
+ def __init__(self, basic, wordpress, logger):
self._basic = basic
self._wordpress = wordpress
+ self._logger = logger
# Public method
@@ -23,7 +24,7 @@ class WPimport:
def fromFile(self, files):
for file in files:
if os.path.exists(file):
- print(file)
+ logger.info("Fichier en cours de traitement : {0}".format(file))
with open(file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
@@ -84,9 +85,9 @@ class WPimport:
data = {"featured_media": id_media}
r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
if r.status_code == 200:
- print("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
+ logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
else:
- print("Aucun media trouvé pour {0}".format(h2))
+ logger.info("Aucun media trouvé pour {0}".format(h2))
## Association image to post
@@ -95,7 +96,7 @@ class WPimport:
data = {"post": post_id}
r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
- print("Association d'une image à l'article {0}".format(title))
+ logger.info("Association d'une image à l'article {0}".format(title))
## Add or update img
@@ -111,7 +112,7 @@ class WPimport:
params = {"force":1}
r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
if r.status_code == 200:
- print("Image supprimé {0}".format(img_name))
+ logger.info("Image supprimé {0}".format(img_name))
data = page.content
img_type = "image/png"
if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
@@ -119,7 +120,7 @@ class WPimport:
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
- print("Ajout d'image {0}".format(img_name))
+ logger.info("Ajout d'image {0}".format(img_name))
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
@@ -241,12 +242,12 @@ class WPimport:
if len(result) == 0:
page_exist = False
else:
- print("La page {0} existe deja et mis à jour".format(title))
+ logger.info("La page {0} existe deja et mis à jour".format(title))
post_id = result[0]["id"]
page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
- print("Article mis à jour : {0}".format(result["title"]["raw"]))
+ logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
@@ -255,12 +256,12 @@ class WPimport:
page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
- print("Article ajoute : {0}".format(result["title"]["raw"]))
+ logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
for i in comment_post:
data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
if page.status_code == 201:
- print("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
@@ -274,14 +275,42 @@ if __name__ == '__main__':
parser.add_argument("--file", help="HTML file", default="")
parser.add_argument("--directory", help="HTML directory", default="")
parser.add_argument("--wordpress", help="URL Wordpress", required=True)
+ parser.add_argument("--debug", help="Verbosity", action="store_true")
+ parser.add_argument("--logfile", help="Log file", default="")
+ parser.add_argument("--quiet", help="No console output", action="store_true")
+
args = parser.parse_args()
+ logger = logging.getLogger('insert wordpress')
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+ if args.quiet is False:
+ ch = logging.StreamHandler()
+ if args.debug is True:
+ logger.setLevel(logging.DEBUG)
+ ch.setLevel(logging.DEBUG)
+ else:
+ logger.setLevel(logging.INFO)
+ ch.setLevel(logging.INFO)
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+
+ if len(args.logfile) > 0:
+ fileHandler = logging.FileHandler(args.logfile)
+ if args.debug is True:
+ fileHandler.setLevel(logging.DEBUG)
+ else:
+ fileHandler.setLevel(logging.INFO)
+ fileHandler.setFormatter(formatter)
+ logger.addHandler(fileHandler)
+
password = getpass()
if len(password) == 0:
- print("No password error !!! ")
+ logger.error("No password error !!! ")
exit(1)
basic = HTTPBasicAuth(args.user, password)
- importWp = WPimport(basic, args.wordpress)
+ importWp = WPimport(basic, args.wordpress, logger)
if len(args.file) > 0:
importWp.fromFile(args.file.split(","))
exit(0)
From 6f7504e669897443a06dfd880db736f33bb048fa Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 12:27:30 +0200
Subject: [PATCH 38/60] separate file class
---
WPImport.py | 268 ++++++++++++++++++++++++++++++++++++++++++++
insert_wordpress.py | 268 +-------------------------------------------
2 files changed, 270 insertions(+), 266 deletions(-)
create mode 100644 WPImport.py
diff --git a/WPImport.py b/WPImport.py
new file mode 100644
index 0000000..9b4691c
--- /dev/null
+++ b/WPImport.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python3
+
+from bs4 import BeautifulSoup
+from urllib.parse import urlparse
+import requests, os, logging, re, json
+
+class WPimport:
+ # Constructor
+ def __init__(self, basic, wordpress, logger):
+ self._basic = basic
+ self._wordpress = wordpress
+ self._logger = logger
+
+ # Public method
+
+ def fromDirectory(self, directory):
+ directory = "{0}/archives".format(directory)
+ directories = self._getDirectories([], "{0}".format(directory))
+ files = self._getFiles(directories)
+ self.fromFile(files)
+
+
+ def fromFile(self, files):
+ for file in files:
+ if os.path.exists(file):
+ logger.info("Fichier en cours de traitement : {0}".format(file))
+ with open(file, 'r') as f:
+ content = f.read()
+ soup = BeautifulSoup(content, 'html.parser')
+ articlebody = soup.find_all("div", class_="articlebody")
+ if len(articlebody) > 0:
+ self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
+
+ # Private method
+
+ ## Get all files
+
+ def _getFiles(self, item):
+ files = []
+ for i in item:
+ for j in os.listdir(i):
+ if os.path.isfile("{0}/{1}".format(i, j)):
+ files.append("{0}/{1}".format(i, j))
+ return files
+
+ ## Get directories
+
+ def _getDirectories(self, subdirectory, item):
+ sub = subdirectory
+ for i in os.listdir(item):
+ if os.path.isdir("{0}/{1}".format(item, i)):
+ sub.append("{0}/{1}".format(item, i))
+ subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
+ return subdirectory
+
+ ## Add or update featured media
+
+ def _addOrUpdateFeaturedMedia(self, soup):
+ item_div = soup.find_all("div", {"data-edittype": "post"})
+ for i in item_div:
+ h2 = i.find_all("h2")[0].text
+ params = {"search":h2, "type":"post"}
+ page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ if len(result) > 0:
+ if h2 == result[0]["title"]:
+ img = i.find_all("img")
+ if len(img) > 0:
+ img_src = img[0].get("src")
+ page = requests.get(img_src)
+ if page.status_code == 200:
+ name_img = img_src.replace("_q", "")
+ name_img = name_img.split("/")[len(name_img.split("/"))-1]
+ params = {"search": name_img}
+ page = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ res = page.json()
+ if len(res) > 0:
+ id_media = res[0]["id"]
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ data = {"featured_media": id_media}
+ r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
+ if r.status_code == 200:
+ logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
+ else:
+ logger.info("Aucun media trouvé pour {0}".format(h2))
+
+ ## Association image to post
+
+ def _linkImgPost(self, title, list_img, post_id):
+ for i in list_img:
+ data = {"post": post_id}
+ r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
+ if r.status_code == 200:
+ logger.info("Association d'une image à l'article {0}".format(title))
+
+ ## Add or update img
+
+ def _addOrUpdateMedia(self, href_img, page):
+ media = {"id":"", "rendered":""}
+ split_fileimg = href_img.split("/")
+ img_name = split_fileimg[len(split_fileimg)-1]
+ params = { "search": img_name}
+ r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
+ if r.status_code == 200:
+ res = r.json()
+ if len(res) > 0:
+ params = {"force":1}
+ r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
+ if r.status_code == 200:
+ logger.info("Image supprimé {0}".format(img_name))
+ data = page.content
+ img_type = "image/png"
+ if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
+ img_type = "image/jpg"
+ headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
+ r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
+ if r.status_code == 201:
+ logger.info("Ajout d'image {0}".format(img_name))
+ res = r.json()
+ media["id"] = res["id"]
+ media["rendered"] = res["guid"]["rendered"]
+ return media
+
+ ## Add or Update post
+
+ def _addOrUpdatePost(self, soup):
+ tags = []
+ month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
+ liste = ["categories", "tags"]
+ elements = {}
+ element = {}
+ listelement = {}
+
+ for i in liste:
+ page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress,i))
+ if page.status_code == 200:
+ elements[i] = page.json()
+ element[i] = []
+ listelement[i] = []
+
+ articletitle = soup.find_all("h2", class_="articletitle")
+ articlebody = soup.find_all("div", class_="articlebody")
+ articledate = soup.find_all("span", class_="articledate")
+ articleacreator = soup.find_all("span", class_="articlecreator")
+ dateheader = soup.find_all("div", class_="dateheader")
+ itemfooter = soup.find_all("div", class_="itemfooter")
+ comment = soup.find_all("div", class_="comment_item")
+ img_a = articlebody[0].find_all("a", {"target": "_blank"})
+ list_img = []
+ for i in img_a:
+ new_img = {}
+ img = i.find_all("img")
+ if len(img) > 0:
+ href_a = i.get("href")
+ href_img = img[0].get("src")
+ new_img["old_src"]=href_img
+ new_img["old_href"]=href_a
+ page_img = requests.get(href_img)
+ if page_img.status_code == 404:
+ href_img = href_a
+ page_img = requests.get(href_a)
+ if page_img.status_code == 200:
+ media=self._addOrUpdateMedia(href_img, page_img)
+ new_img["id"]=media["id"]
+ new_img["new_src"]=media["rendered"]
+ list_img.append(new_img)
+ if href_img != href_a:
+ media=self._addOrUpdateMedia(href_a, page_img)
+ new_img["id"]=media["id"]
+ new_img["new_src"]=media["rendered"]
+ list_img.append(new_img)
+
+ comment_post = []
+ for i in comment:
+ comment_item = i.text.split("\n")
+ footer = i.find_all("div", class_="itemfooter")
+ comment_author = footer[0].text.split(",")[0].replace("Posté par ", "")
+ comment_date = footer[0].find_all("abbr")[0].get("title")
+ comment_content = ""
+ for j in range(0, len(comment_item)-2):
+ if len(comment_item[j]) > 0:
+ comment_content = comment_content + comment_item[j] + "
"
+ comment_content = comment_content + "
"
+ comment_post.append({"author": comment_author, "date": comment_date, "content": comment_content})
+ a = itemfooter[0].find_all("a", {"rel": True})
+ for i in a:
+ rel = i.get("rel")
+ if rel[0] == 'tag':
+ href = i.get("href")
+ if re.search(r'/tag/', href):
+ element["tags"].append(i.text)
+ if re.search(r'/archives/', href):
+ element["categories"].append(i.text)
+ for i in liste:
+ for j in element[i]:
+ element_exist = False
+ for k in elements[i]:
+ if k["name"] == j:
+ element_exist = True
+ listelement[i].append(k["id"])
+ if element_exist is False:
+ data = {"name": j}
+ page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i), auth=self._basic, data=data)
+ if page.status_code == 201:
+ result = page.json()
+ listelement[i].append(result["id"])
+
+ title = articletitle[0].text
+ author = articleacreator[0].text.lower()
+ body = articlebody[0].find_all("p")
+ bodyhtml = ""
+ for i in body:
+ if len(i.text) == 1:
+ bodyhtml = bodyhtml + "
"
+ else:
+ bodyhtml = bodyhtml + str(i).replace("
", "").replace("
", "").replace("
", "
") + "
"
+ bodyhtml = bodyhtml + ""
+ for i in list_img:
+ o = urlparse(i["new_src"])
+ bodyhtml = bodyhtml.replace(i["old_href"], o.path)
+ bodyhtml = bodyhtml.replace(i["old_src"], o.path)
+ hour = articledate[0].text
+ time = dateheader[0].text.split(" ")
+ data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
+ params = {"search":author}
+ page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ data["author"] = result[0]["id"]
+
+ params = {"search":title}
+ page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, params=params)
+ page_exist = True
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ if page.status_code == 200:
+ result = page.json()
+ if len(result) == 0:
+ page_exist = False
+ else:
+ logger.info("La page {0} existe deja et mis à jour".format(title))
+ post_id = result[0]["id"]
+ page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
+ if page.status_code == 200:
+ result = page.json()
+ logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
+ self._linkImgPost(result["title"]["raw"], list_img, result["id"])
+
+
+
+ if page_exist == False:
+ page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
+ if page.status_code == 201:
+ result = page.json()
+ logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
+ for i in comment_post:
+ data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
+ page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
+ if page.status_code == 201:
+ logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ self._linkImgPost(result["title"]["raw"], list_img, result["id"])
+
+
+
+
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 2fc36fa..68d2244 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -1,272 +1,8 @@
#!/usr/bin/python3
-from bs4 import BeautifulSoup
-from urllib.parse import urlparse
from requests.auth import HTTPBasicAuth
from getpass import getpass
-import requests, os, argparse, logging, re, json
-
-class WPimport:
- # Constructor
- def __init__(self, basic, wordpress, logger):
- self._basic = basic
- self._wordpress = wordpress
- self._logger = logger
-
- # Public method
-
- def fromDirectory(self, directory):
- directory = "{0}/archives".format(directory)
- directories = self._getDirectories([], "{0}".format(directory))
- files = self._getFiles(directories)
- self.fromFile(files)
-
-
- def fromFile(self, files):
- for file in files:
- if os.path.exists(file):
- logger.info("Fichier en cours de traitement : {0}".format(file))
- with open(file, 'r') as f:
- content = f.read()
- soup = BeautifulSoup(content, 'html.parser')
- articlebody = soup.find_all("div", class_="articlebody")
- if len(articlebody) > 0:
- self._addOrUpdatePost(soup)
- else:
- self._addOrUpdateFeaturedMedia(soup)
-
- # Private method
-
- ## Get all files
-
- def _getFiles(self, item):
- files = []
- for i in item:
- for j in os.listdir(i):
- if os.path.isfile("{0}/{1}".format(i, j)):
- files.append("{0}/{1}".format(i, j))
- return files
-
- ## Get directories
-
- def _getDirectories(self, subdirectory, item):
- sub = subdirectory
- for i in os.listdir(item):
- if os.path.isdir("{0}/{1}".format(item, i)):
- sub.append("{0}/{1}".format(item, i))
- subdirectory = self._getDirectories(sub, "{0}/{1}".format(item, i))
- return subdirectory
-
- ## Add or update featured media
-
- def _addOrUpdateFeaturedMedia(self, soup):
- item_div = soup.find_all("div", {"data-edittype": "post"})
- for i in item_div:
- h2 = i.find_all("h2")[0].text
- params = {"search":h2, "type":"post"}
- page = requests.get("http://{0}/wp-json/wp/v2/search".format(self._wordpress), auth=self._basic, params=params)
- if page.status_code == 200:
- result = page.json()
- if len(result) > 0:
- if h2 == result[0]["title"]:
- img = i.find_all("img")
- if len(img) > 0:
- img_src = img[0].get("src")
- page = requests.get(img_src)
- if page.status_code == 200:
- name_img = img_src.replace("_q", "")
- name_img = name_img.split("/")[len(name_img.split("/"))-1]
- params = {"search": name_img}
- page = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
- if page.status_code == 200:
- res = page.json()
- if len(res) > 0:
- id_media = res[0]["id"]
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- data = {"featured_media": id_media}
- r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
- if r.status_code == 200:
- logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
- else:
- logger.info("Aucun media trouvé pour {0}".format(h2))
-
- ## Association image to post
-
- def _linkImgPost(self, title, list_img, post_id):
- for i in list_img:
- data = {"post": post_id}
- r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
- if r.status_code == 200:
- logger.info("Association d'une image à l'article {0}".format(title))
-
- ## Add or update img
-
- def _addOrUpdateMedia(self, href_img, page):
- media = {"id":"", "rendered":""}
- split_fileimg = href_img.split("/")
- img_name = split_fileimg[len(split_fileimg)-1]
- params = { "search": img_name}
- r = requests.get("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, params=params)
- if r.status_code == 200:
- res = r.json()
- if len(res) > 0:
- params = {"force":1}
- r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
- if r.status_code == 200:
- logger.info("Image supprimé {0}".format(img_name))
- data = page.content
- img_type = "image/png"
- if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
- img_type = "image/jpg"
- headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
- r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
- if r.status_code == 201:
- logger.info("Ajout d'image {0}".format(img_name))
- res = r.json()
- media["id"] = res["id"]
- media["rendered"] = res["guid"]["rendered"]
- return media
-
- ## Add or Update post
-
- def _addOrUpdatePost(self, soup):
- tags = []
- month = {"janvier":"01", "février": "02", "mars": "03", "avril":"04", "mai": "05", "juin": "06", "juillet": "07", "août": "08", "septembre": "09", "octobre": "10", "novembre": "11", "décembre": "12"}
- liste = ["categories", "tags"]
- elements = {}
- element = {}
- listelement = {}
-
- for i in liste:
- page = requests.get("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress,i))
- if page.status_code == 200:
- elements[i] = page.json()
- element[i] = []
- listelement[i] = []
-
- articletitle = soup.find_all("h2", class_="articletitle")
- articlebody = soup.find_all("div", class_="articlebody")
- articledate = soup.find_all("span", class_="articledate")
- articleacreator = soup.find_all("span", class_="articlecreator")
- dateheader = soup.find_all("div", class_="dateheader")
- itemfooter = soup.find_all("div", class_="itemfooter")
- comment = soup.find_all("div", class_="comment_item")
- img_a = articlebody[0].find_all("a", {"target": "_blank"})
- list_img = []
- for i in img_a:
- new_img = {}
- img = i.find_all("img")
- if len(img) > 0:
- href_a = i.get("href")
- href_img = img[0].get("src")
- new_img["old_src"]=href_img
- new_img["old_href"]=href_a
- page_img = requests.get(href_img)
- if page_img.status_code == 404:
- href_img = href_a
- page_img = requests.get(href_a)
- if page_img.status_code == 200:
- media=self._addOrUpdateMedia(href_img, page_img)
- new_img["id"]=media["id"]
- new_img["new_src"]=media["rendered"]
- list_img.append(new_img)
- if href_img != href_a:
- media=self._addOrUpdateMedia(href_a, page_img)
- new_img["id"]=media["id"]
- new_img["new_src"]=media["rendered"]
- list_img.append(new_img)
-
- comment_post = []
- for i in comment:
- comment_item = i.text.split("\n")
- footer = i.find_all("div", class_="itemfooter")
- comment_author = footer[0].text.split(",")[0].replace("Posté par ", "")
- comment_date = footer[0].find_all("abbr")[0].get("title")
- comment_content = ""
- for j in range(0, len(comment_item)-2):
- if len(comment_item[j]) > 0:
- comment_content = comment_content + comment_item[j] + "
"
- comment_content = comment_content + "
"
- comment_post.append({"author": comment_author, "date": comment_date, "content": comment_content})
- a = itemfooter[0].find_all("a", {"rel": True})
- for i in a:
- rel = i.get("rel")
- if rel[0] == 'tag':
- href = i.get("href")
- if re.search(r'/tag/', href):
- element["tags"].append(i.text)
- if re.search(r'/archives/', href):
- element["categories"].append(i.text)
- for i in liste:
- for j in element[i]:
- element_exist = False
- for k in elements[i]:
- if k["name"] == j:
- element_exist = True
- listelement[i].append(k["id"])
- if element_exist is False:
- data = {"name": j}
- page = requests.post("http://{0}/wp-json/wp/v2/{1}".format(self._wordpress, i), auth=self._basic, data=data)
- if page.status_code == 201:
- result = page.json()
- listelement[i].append(result["id"])
-
- title = articletitle[0].text
- author = articleacreator[0].text.lower()
- body = articlebody[0].find_all("p")
- bodyhtml = ""
- for i in body:
- if len(i.text) == 1:
- bodyhtml = bodyhtml + "
"
- else:
- bodyhtml = bodyhtml + str(i).replace("
", "").replace("
", "").replace("
", "
") + "
"
- bodyhtml = bodyhtml + ""
- for i in list_img:
- o = urlparse(i["new_src"])
- bodyhtml = bodyhtml.replace(i["old_href"], o.path)
- bodyhtml = bodyhtml.replace(i["old_src"], o.path)
- hour = articledate[0].text
- time = dateheader[0].text.split(" ")
- data = {"title":title, "content":bodyhtml, "status":"publish", "date": "{0}-{1}-{2}T{3}:00".format(time[2],month[time[1]],time[0], hour), "tags": listelement["tags"], "categories": listelement["categories"]}
- params = {"search":author}
- page = requests.get("http://{0}/wp-json/wp/v2/users".format(self._wordpress), auth=self._basic, params=params)
- if page.status_code == 200:
- result = page.json()
- data["author"] = result[0]["id"]
-
- params = {"search":title}
- page = requests.get("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, params=params)
- page_exist = True
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- if page.status_code == 200:
- result = page.json()
- if len(result) == 0:
- page_exist = False
- else:
- logger.info("La page {0} existe deja et mis à jour".format(title))
- post_id = result[0]["id"]
- page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
- if page.status_code == 200:
- result = page.json()
- logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
- self._linkImgPost(result["title"]["raw"], list_img, result["id"])
-
-
-
- if page_exist == False:
- page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
- if page.status_code == 201:
- result = page.json()
- logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
- for i in comment_post:
- data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
- page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
- if page.status_code == 201:
- logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
- self._linkImgPost(result["title"]["raw"], list_img, result["id"])
-
-
-
-
+import argparse, logging
+import WPImport
if __name__ == '__main__':
From 481fc40929357d30dc8eb5b9ba9768189428ba7f Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 21:27:35 +0200
Subject: [PATCH 39/60] separate class file for WPimport
---
WPImport.py | 20 ++++++++++----------
insert_wordpress.py | 2 +-
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/WPImport.py b/WPImport.py
index 9b4691c..e3164f1 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -23,7 +23,7 @@ class WPimport:
def fromFile(self, files):
for file in files:
if os.path.exists(file):
- logger.info("Fichier en cours de traitement : {0}".format(file))
+ self._logger.info("Fichier en cours de traitement : {0}".format(file))
with open(file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
@@ -84,9 +84,9 @@ class WPimport:
data = {"featured_media": id_media}
r = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, result[0]["id"]), auth=self._basic, headers=headers, data=json.dumps(data))
if r.status_code == 200:
- logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
+ self._logger.info("Ajout media featured : {0}".format(r.json()["title"]["raw"]))
else:
- logger.info("Aucun media trouvé pour {0}".format(h2))
+ self._logger.info("Aucun media trouvé pour {0}".format(h2))
## Association image to post
@@ -95,7 +95,7 @@ class WPimport:
data = {"post": post_id}
r = requests.post("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, i["id"]), auth=self._basic, data=data)
if r.status_code == 200:
- logger.info("Association d'une image à l'article {0}".format(title))
+ self._logger.info("Association d'une image à l'article {0}".format(title))
## Add or update img
@@ -111,7 +111,7 @@ class WPimport:
params = {"force":1}
r = requests.delete("http://{0}/wp-json/wp/v2/media/{1}".format(self._wordpress, res[0]["id"]), auth=self._basic, params=params)
if r.status_code == 200:
- logger.info("Image supprimé {0}".format(img_name))
+ self._logger.info("Image supprimé {0}".format(img_name))
data = page.content
img_type = "image/png"
if img_name.split(".")[1] == "jpg" or img_name.split(".")[1] == "jpeg":
@@ -119,7 +119,7 @@ class WPimport:
headers={ 'Content-Type': img_type,'Content-Disposition' : 'attachment; filename={0}'.format(img_name)}
r = requests.post("http://{0}/wp-json/wp/v2/media".format(self._wordpress), auth=self._basic, headers=headers, data=data)
if r.status_code == 201:
- logger.info("Ajout d'image {0}".format(img_name))
+ self._logger.info("Ajout d'image {0}".format(img_name))
res = r.json()
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
@@ -241,12 +241,12 @@ class WPimport:
if len(result) == 0:
page_exist = False
else:
- logger.info("La page {0} existe deja et mis à jour".format(title))
+ self._logger.info("La page {0} existe deja et mis à jour".format(title))
post_id = result[0]["id"]
page = requests.post("http://{0}/wp-json/wp/v2/posts/{1}".format(self._wordpress, post_id), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 200:
result = page.json()
- logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
+ self._logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
@@ -255,12 +255,12 @@ class WPimport:
page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 201:
result = page.json()
- logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
+ self._logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
for i in comment_post:
data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
if page.status_code == 201:
- logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ self._logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
diff --git a/insert_wordpress.py b/insert_wordpress.py
index 68d2244..4c828cc 100644
--- a/insert_wordpress.py
+++ b/insert_wordpress.py
@@ -46,7 +46,7 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- importWp = WPimport(basic, args.wordpress, logger)
+ importWp = WPImport.WPimport(basic, args.wordpress, logger)
if len(args.file) > 0:
importWp.fromFile(args.file.split(","))
exit(0)
From 2289066dd5360006cebc0de71c25f8a31c4ea43b Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 21:44:52 +0200
Subject: [PATCH 40/60] rename main file
---
insert_wordpress.py => import_export_canalblog.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename insert_wordpress.py => import_export_canalblog.py (100%)
diff --git a/insert_wordpress.py b/import_export_canalblog.py
similarity index 100%
rename from insert_wordpress.py
rename to import_export_canalblog.py
From 8384dcb2b618532b9703be0c12bbc0553fa7d96a Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 22:14:20 +0200
Subject: [PATCH 41/60] create class WPExport
---
WPExport.py | 242 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 242 insertions(+)
create mode 100644 WPExport.py
diff --git a/WPExport.py b/WPExport.py
new file mode 100644
index 0000000..87571d7
--- /dev/null
+++ b/WPExport.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python3
+from bs4 import BeautifulSoup
+from urllib.parse import urlparse
+import requests, os, argparse, logging
+
+class WPExport:
+ def __init__(self, url, logger):
+ self._url = url
+ self._logger = logger
+
+ def _mkdirPath(self, path_dir, logger):
+ if not os.path.exists(path_dir):
+ makedir = []
+ pathh = path_dir.split("/")
+ for i in pathh:
+ makedir.append(i)
+ repath = "/".join(makedir)
+ if not os.path.exists(repath):
+ self._logger.debug("Dossier crée : {0}".format(repath))
+ try:
+ if len(repath) > 0:
+ os.mkdir(repath)
+ except Exception as err:
+ self._logger.error("Directory error : {0}".format(err))
+ self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
+ exit(1)
+
+
+ def _getScriptCss(self, js, css, logger):
+ try:
+ page = requests.get(url)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ page_url = []
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, 'html.parser')
+ if js is True:
+ script = soup.find_all("script")
+ for anchor in script:
+ src = anchor.get("src", "/")
+ if src != "/":
+ try:
+ u = urlparse(url)
+ o = urlparse(src)
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ if o.netloc == "":
+ o = o._replace(netloc=u.netloc)
+ o = o._replace(scheme=u.scheme)
+ page_url.append(o.geturl())
+ if css is True:
+ link = soup.find_all("link")
+ for anchor in link:
+ rel = anchor.get("rel")
+ if rel[0] == "stylesheet":
+ href = anchor.get("href", "/")
+ if href != "/":
+ try:
+ u = urlparse(url)
+ o = urlparse(href)
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ if o.netloc == "":
+ o = o._replace(netloc=u.netloc)
+ o = o._replace(scheme=u.scheme)
+ page_url.append(o.geturl())
+ return page_url
+
+ def _getImg(self, webpage):
+ page_img = []
+ for i in webpage:
+ try:
+ page = requests.get(i)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, 'html.parser')
+ img = soup.find_all("img")
+ self._logger.info("image from page: {0} : ".format(i))
+ for anchor in img:
+ src = anchor.get("src", "/")
+ if src != "/":
+ if src not in page_img:
+ self._logger.info("image: {0} : ".format(src))
+ page_img.append(src)
+ return page_img
+
+ def _getUrlPage(self):
+ try:
+ page = requests.get(self._url)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ page_url = []
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, 'html.parser')
+ ul = soup.find_all("ul", id="listsmooth")
+ for anchor in ul[0].find_all("a"):
+ href = anchor.get('href', '/')
+ if href != "#":
+ page_url.append(href)
+
+ webpage = []
+ for i in page_url:
+ try:
+ page = requests.get(i)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ if page.status_code == 200:
+ self._logger.info("page : {0}".format(i))
+ if i not in webpage:
+ webpage.append(i)
+ soup = BeautifulSoup(page.text, 'html.parser')
+ class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
+ if len(class_div) > 0:
+ pagingfirstline = class_div[0].find_all("a")
+ if len(pagingfirstline) > 1:
+ lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
+ element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
+ number_page = element_lastpage.split("-")[0].split("p")[1]
+ number_lastpage = int(number_page) / 10
+ for j in range(1,int(number_lastpage)):
+ paging = j * 10
+ categorie = urlparse(i).path.split("/")
+ url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
+ if len(categorie) > 2:
+ url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
+ self._logger.info(url_paging)
+ if url_paging not in webpage:
+ webpage.append(url_paging)
+ page = requests.get(url_paging)
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, 'html.parser')
+ h2 = soup.find_all("h2")
+ for title in h2:
+ href = title.find_all("a")[0].get("href", "/")
+ if href not in webpage:
+ try:
+ o = urlparse(href)
+ o = o._replace(scheme="https").geturl()
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ webpage.append(o)
+ return webpage
+
+
+ def _downloadPage(self, webpage, backup_dir):
+
+ for i in range(0, len(webpage)):
+ try:
+ o = urlparse(webpage[i])
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ path_web = o.path.split("/")
+ filePageWeb = path_web[len(path_web)-1]
+ path_web.pop(len(path_web)-1)
+ dir_page_web = "/".join(path_web)
+ self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
+ try:
+ r = requests.get(webpage[i])
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ if r.status_code == 200:
+ fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
+ if len(dir_page_web) > 0 and len(filePageWeb) > 0:
+ fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
+ self._logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
+ try:
+ open(fileDownload, "wb").write(r.content)
+ except Exception as err:
+ self._logger.error("file error : {0}".format(err))
+ exit(1)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--url", help="canblog URL to be scraping", required=True)
+ parser.add_argument("--dir",
+ default="backup",
+ help="backup file path")
+ parser.add_argument("--debug", help="Verbosity", action="store_true")
+ parser.add_argument("--logfile", help="Log file", default="")
+ parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
+ parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
+ parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
+ parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
+ parser.add_argument("--quiet", help="No console output", action="store_true")
+ args = parser.parse_args()
+ logger = logging.getLogger('web_scrap')
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+ if args.quiet is False:
+ ch = logging.StreamHandler()
+ if args.debug is True:
+ logger.setLevel(logging.DEBUG)
+ ch.setLevel(logging.DEBUG)
+ else:
+ logger.setLevel(logging.INFO)
+ ch.setLevel(logging.INFO)
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+
+ if len(args.logfile) > 0:
+ fileHandler = logging.FileHandler(args.logfile)
+ if args.debug is True:
+ fileHandler.setLevel(logging.DEBUG)
+ else:
+ fileHandler.setLevel(logging.INFO)
+ fileHandler.setFormatter(formatter)
+ logger.addHandler(fileHandler)
+
+ try:
+ o = urlparse(args.url)
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ if args.js is False:
+ script = getScriptCss(url, True, False, logger)
+ downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
+
+ if args.css is False:
+ css = getScriptCss(url, False, True, logger)
+ downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
+
+ if args.html is False or args.img is False:
+ webpage = getUrlPage(url, logger)
+ if args.html is False:
+ downloadPage(webpage, args.dir, logger)
+
+ if args.img is False:
+ page_src = getImg(webpage, logger)
+ downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)
From b3f623cbd5ea66560165ad1c4f9fab691b0174e5 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 23:20:52 +0200
Subject: [PATCH 42/60] subparser import
---
import_export_canalblog.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 4c828cc..ed55576 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -7,14 +7,19 @@ import WPImport
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument("--user", help="wordpress user", required=True)
- parser.add_argument("--file", help="HTML file", default="")
- parser.add_argument("--directory", help="HTML directory", default="")
- parser.add_argument("--wordpress", help="URL Wordpress", required=True)
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--quiet", help="No console output", action="store_true")
+ subparsers = parser.add_subparsers()
+
+ import_parser = subparsers.add_parser("import")
+ import_parser.add_argument("--user", help="wordpress user", required=True)
+ import_parser.add_argument("--file", help="HTML file", default="")
+ import_parser.add_argument("--directory", help="HTML directory", default="")
+ import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
+
+
args = parser.parse_args()
logger = logging.getLogger('insert wordpress')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
From ebc6206ec9c6b430461b9c10df2c1c0cbdcfeccc Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 23:34:56 +0200
Subject: [PATCH 43/60] add subparser export
---
import_export_canalblog.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index ed55576..f1e07df 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -18,6 +18,18 @@ if __name__ == '__main__':
import_parser.add_argument("--file", help="HTML file", default="")
import_parser.add_argument("--directory", help="HTML directory", default="")
import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
+
+ export_parser = subparsers.add_parser("export")
+
+ export_parser.add_argument("--url", help="canblog URL to be scraping", required=True)
+ export_parser.add_argument("--directory",
+ default="backup",
+ help="backup file path")
+ export_parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
+ export_parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
+ export_parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
+ export_parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
+
args = parser.parse_args()
From 7e484fa308a9020effd8beedfadf123da06615ff Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sat, 8 Apr 2023 23:43:06 +0200
Subject: [PATCH 44/60] add args command name
---
import_export_canalblog.py | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index f1e07df..7e5ea30 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -11,7 +11,7 @@ if __name__ == '__main__':
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--quiet", help="No console output", action="store_true")
- subparsers = parser.add_subparsers()
+ subparsers = parser.add_subparsers(dest="command")
import_parser = subparsers.add_parser("import")
import_parser.add_argument("--user", help="wordpress user", required=True)
@@ -33,6 +33,7 @@ if __name__ == '__main__':
args = parser.parse_args()
+
logger = logging.getLogger('insert wordpress')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
@@ -57,15 +58,16 @@ if __name__ == '__main__':
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
- password = getpass()
- if len(password) == 0:
- logger.error("No password error !!! ")
- exit(1)
+ if args.command == "export":
+ password = getpass()
+ if len(password) == 0:
+ logger.error("No password error !!! ")
+ exit(1)
- basic = HTTPBasicAuth(args.user, password)
- importWp = WPImport.WPimport(basic, args.wordpress, logger)
- if len(args.file) > 0:
- importWp.fromFile(args.file.split(","))
- exit(0)
- if len(args.directory) > 0:
- importWp.fromDirectory(args.directory)
\ No newline at end of file
+ basic = HTTPBasicAuth(args.user, password)
+ importWp = WPImport.WPimport(basic, args.wordpress, logger)
+ if len(args.file) > 0:
+ importWp.fromFile(args.file.split(","))
+ exit(0)
+ if len(args.directory) > 0:
+ importWp.fromDirectory(args.directory)
\ No newline at end of file
From cd6b03b0ffbb592e619ea74485f6d9b68dcd5d8e Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 9 Apr 2023 21:17:49 +0200
Subject: [PATCH 45/60] add parameter parser
---
WPImport.py | 5 +++--
import_export_canalblog.py | 3 ++-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/WPImport.py b/WPImport.py
index e3164f1..92f6681 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -6,10 +6,11 @@ import requests, os, logging, re, json
class WPimport:
# Constructor
- def __init__(self, basic, wordpress, logger):
+ def __init__(self, basic, wordpress, logger, parser):
self._basic = basic
self._wordpress = wordpress
self._logger = logger
+ self._parser = parser
# Public method
@@ -26,7 +27,7 @@ class WPimport:
self._logger.info("Fichier en cours de traitement : {0}".format(file))
with open(file, 'r') as f:
content = f.read()
- soup = BeautifulSoup(content, 'html.parser')
+ soup = BeautifulSoup(content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
self._addOrUpdatePost(soup)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 7e5ea30..cc19a06 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -10,6 +10,7 @@ if __name__ == '__main__':
parser.add_argument("--debug", help="Verbosity", action="store_true")
parser.add_argument("--logfile", help="Log file", default="")
parser.add_argument("--quiet", help="No console output", action="store_true")
+ parser.add_argument("--parser", help="Parser content", default="html.parser")
subparsers = parser.add_subparsers(dest="command")
@@ -65,7 +66,7 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- importWp = WPImport.WPimport(basic, args.wordpress, logger)
+ importWp = WPImport.WPimport(basic, args.wordpress, logger, args.parser)
if len(args.file) > 0:
importWp.fromFile(args.file.split(","))
exit(0)
From 9ed08ea964f2725812236f3897f2d9c5c976e2b7 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 9 Apr 2023 21:45:51 +0200
Subject: [PATCH 46/60] add parameter parser
---
WPExport.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/WPExport.py b/WPExport.py
index 87571d7..6fd0b8a 100644
--- a/WPExport.py
+++ b/WPExport.py
@@ -4,9 +4,10 @@ from urllib.parse import urlparse
import requests, os, argparse, logging
class WPExport:
- def __init__(self, url, logger):
+ def __init__(self, url, logger, parser):
self._url = url
self._logger = logger
+ self._parser = parser
def _mkdirPath(self, path_dir, logger):
if not os.path.exists(path_dir):
@@ -34,7 +35,7 @@ class WPExport:
exit(1)
page_url = []
if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
+ soup = BeautifulSoup(page.text, self._parser)
if js is True:
script = soup.find_all("script")
for anchor in script:
@@ -78,7 +79,7 @@ class WPExport:
self._logger.error("Connection error : {0}".format(err))
exit(1)
if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
+ soup = BeautifulSoup(page.text, self._parser)
img = soup.find_all("img")
self._logger.info("image from page: {0} : ".format(i))
for anchor in img:
@@ -97,7 +98,7 @@ class WPExport:
exit(1)
page_url = []
if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
+ soup = BeautifulSoup(page.text, self._parser)
ul = soup.find_all("ul", id="listsmooth")
for anchor in ul[0].find_all("a"):
href = anchor.get('href', '/')
@@ -115,7 +116,7 @@ class WPExport:
self._logger.info("page : {0}".format(i))
if i not in webpage:
webpage.append(i)
- soup = BeautifulSoup(page.text, 'html.parser')
+ soup = BeautifulSoup(page.text, self._parser)
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
if len(class_div) > 0:
pagingfirstline = class_div[0].find_all("a")
@@ -135,7 +136,7 @@ class WPExport:
webpage.append(url_paging)
page = requests.get(url_paging)
if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
+ soup = BeautifulSoup(page.text, self._parser)
h2 = soup.find_all("h2")
for title in h2:
href = title.find_all("a")[0].get("href", "/")
From bba6cd1ca7561f3a8c8be70ce1aee6fd6eb4b2b0 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 9 Apr 2023 22:49:44 +0200
Subject: [PATCH 47/60] add export canalblog
---
WPExport.py | 264 +++++++++++++++++--------------------
import_export_canalblog.py | 28 +++-
2 files changed, 147 insertions(+), 145 deletions(-)
diff --git a/WPExport.py b/WPExport.py
index 6fd0b8a..6db20ad 100644
--- a/WPExport.py
+++ b/WPExport.py
@@ -4,93 +4,44 @@ from urllib.parse import urlparse
import requests, os, argparse, logging
class WPExport:
- def __init__(self, url, logger, parser):
+ def __init__(self, url, logger, parser, directory):
self._url = url
self._logger = logger
self._parser = parser
-
- def _mkdirPath(self, path_dir, logger):
- if not os.path.exists(path_dir):
- makedir = []
- pathh = path_dir.split("/")
- for i in pathh:
- makedir.append(i)
- repath = "/".join(makedir)
- if not os.path.exists(repath):
- self._logger.debug("Dossier crée : {0}".format(repath))
- try:
- if len(repath) > 0:
- os.mkdir(repath)
- except Exception as err:
- self._logger.error("Directory error : {0}".format(err))
- self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
- exit(1)
+ self._dir = directory
- def _getScriptCss(self, js, css, logger):
- try:
- page = requests.get(url)
- except Exception as err:
- self._logger.error("Connection error : {0}".format(err))
- exit(1)
- page_url = []
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, self._parser)
- if js is True:
- script = soup.find_all("script")
- for anchor in script:
- src = anchor.get("src", "/")
- if src != "/":
- try:
- u = urlparse(url)
- o = urlparse(src)
- except Exception as err:
- self._logger.error("parsing error : {0}".format(err))
- exit(1)
- if o.netloc == "":
- o = o._replace(netloc=u.netloc)
- o = o._replace(scheme=u.scheme)
- page_url.append(o.geturl())
- if css is True:
- link = soup.find_all("link")
- for anchor in link:
- rel = anchor.get("rel")
- if rel[0] == "stylesheet":
- href = anchor.get("href", "/")
- if href != "/":
- try:
- u = urlparse(url)
- o = urlparse(href)
- except Exception as err:
- self._logger.error("parsing error : {0}".format(err))
- exit(1)
- if o.netloc == "":
- o = o._replace(netloc=u.netloc)
- o = o._replace(scheme=u.scheme)
- page_url.append(o.geturl())
- return page_url
+ # Public method
- def _getImg(self, webpage):
- page_img = []
- for i in webpage:
- try:
- page = requests.get(i)
- except Exception as err:
- self._logger.error("Connection error : {0}".format(err))
- exit(1)
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, self._parser)
- img = soup.find_all("img")
- self._logger.info("image from page: {0} : ".format(i))
- for anchor in img:
- src = anchor.get("src", "/")
- if src != "/":
- if src not in page_img:
- self._logger.info("image: {0} : ".format(src))
- page_img.append(src)
- return page_img
+ # Download JS
- def _getUrlPage(self):
+ def downloadJs(self):
+ script = self._getScriptCss(True, False)
+ o = urlparse(self._url)
+ self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))
+
+ # Download CSS
+
+ def downloadCss(self):
+ css = self._getScriptCss(False, True)
+ o = urlparse(self._url)
+ self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
+
+ # Download HTML
+
+ def downloadHTML(self, webpage):
+ self._downloadPage(webpage, self._dir)
+
+ # Download Image
+
+ def downloadImg(self, webpage):
+ page_src = self._getImg(webpage)
+ o = urlparse(self._url)
+ self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
+
+
+ # Get URL
+ def getUrlPage(self):
try:
page = requests.get(self._url)
except Exception as err:
@@ -151,6 +102,95 @@ class WPExport:
return webpage
+ # Private method
+ #
+ # Create path
+ def _mkdirPath(self, path_dir, logger):
+ if not os.path.exists(path_dir):
+ makedir = []
+ pathh = path_dir.split("/")
+ for i in pathh:
+ makedir.append(i)
+ repath = "/".join(makedir)
+ if not os.path.exists(repath):
+ self._logger.debug("Dossier crée : {0}".format(repath))
+ try:
+ if len(repath) > 0:
+ os.mkdir(repath)
+ except Exception as err:
+ self._logger.error("Directory error : {0}".format(err))
+ self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
+ exit(1)
+
+
+ # Get Css and JS
+ def _getScriptCss(self, js, css):
+ try:
+ page = requests.get(url)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ page_url = []
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, self._parser)
+ if js is True:
+ script = soup.find_all("script")
+ for anchor in script:
+ src = anchor.get("src", "/")
+ if src != "/":
+ try:
+ u = urlparse(url)
+ o = urlparse(src)
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ if o.netloc == "":
+ o = o._replace(netloc=u.netloc)
+ o = o._replace(scheme=u.scheme)
+ page_url.append(o.geturl())
+ if css is True:
+ link = soup.find_all("link")
+ for anchor in link:
+ rel = anchor.get("rel")
+ if rel[0] == "stylesheet":
+ href = anchor.get("href", "/")
+ if href != "/":
+ try:
+ u = urlparse(url)
+ o = urlparse(href)
+ except Exception as err:
+ self._logger.error("parsing error : {0}".format(err))
+ exit(1)
+ if o.netloc == "":
+ o = o._replace(netloc=u.netloc)
+ o = o._replace(scheme=u.scheme)
+ page_url.append(o.geturl())
+ return page_url
+
+ # Get image
+
+ def _getImg(self, webpage):
+ page_img = []
+ for i in webpage:
+ try:
+ page = requests.get(i)
+ except Exception as err:
+ self._logger.error("Connection error : {0}".format(err))
+ exit(1)
+ if page.status_code == 200:
+ soup = BeautifulSoup(page.text, self._parser)
+ img = soup.find_all("img")
+ self._logger.info("image from page: {0} : ".format(i))
+ for anchor in img:
+ src = anchor.get("src", "/")
+ if src != "/":
+ if src not in page_img:
+ self._logger.info("image: {0} : ".format(src))
+ page_img.append(src)
+ return page_img
+
+
+ # Download page
def _downloadPage(self, webpage, backup_dir):
for i in range(0, len(webpage)):
@@ -178,66 +218,4 @@ class WPExport:
open(fileDownload, "wb").write(r.content)
except Exception as err:
self._logger.error("file error : {0}".format(err))
- exit(1)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument("--url", help="canblog URL to be scraping", required=True)
- parser.add_argument("--dir",
- default="backup",
- help="backup file path")
- parser.add_argument("--debug", help="Verbosity", action="store_true")
- parser.add_argument("--logfile", help="Log file", default="")
- parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
- parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
- parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
- parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
- parser.add_argument("--quiet", help="No console output", action="store_true")
- args = parser.parse_args()
- logger = logging.getLogger('web_scrap')
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-
- if args.quiet is False:
- ch = logging.StreamHandler()
- if args.debug is True:
- logger.setLevel(logging.DEBUG)
- ch.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
- ch.setLevel(logging.INFO)
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
-
- if len(args.logfile) > 0:
- fileHandler = logging.FileHandler(args.logfile)
- if args.debug is True:
- fileHandler.setLevel(logging.DEBUG)
- else:
- fileHandler.setLevel(logging.INFO)
- fileHandler.setFormatter(formatter)
- logger.addHandler(fileHandler)
-
- try:
- o = urlparse(args.url)
- o = o._replace(scheme="https")
- url = o.geturl().replace(":///", "://")
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- if args.js is False:
- script = getScriptCss(url, True, False, logger)
- downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
-
- if args.css is False:
- css = getScriptCss(url, False, True, logger)
- downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
-
- if args.html is False or args.img is False:
- webpage = getUrlPage(url, logger)
- if args.html is False:
- downloadPage(webpage, args.dir, logger)
-
- if args.img is False:
- page_src = getImg(webpage, logger)
- downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)
+ exit(1)
\ No newline at end of file
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index cc19a06..b38d5be 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -18,6 +18,7 @@ if __name__ == '__main__':
import_parser.add_argument("--user", help="wordpress user", required=True)
import_parser.add_argument("--file", help="HTML file", default="")
import_parser.add_argument("--directory", help="HTML directory", default="")
+ import_parser.add_argument("--canalblog", help="URL Canalblog", default="")
import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
export_parser = subparsers.add_parser("export")
@@ -59,7 +60,7 @@ if __name__ == '__main__':
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
- if args.command == "export":
+ if args.command == "import":
password = getpass()
if len(password) == 0:
logger.error("No password error !!! ")
@@ -71,4 +72,27 @@ if __name__ == '__main__':
importWp.fromFile(args.file.split(","))
exit(0)
if len(args.directory) > 0:
- importWp.fromDirectory(args.directory)
\ No newline at end of file
+ importWp.fromDirectory(args.directory)
+ exit(0)
+ if args.command == "export":
+ try:
+ o = urlparse(args.url)
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ exit(1)
+ exportWp = WPExport.WPExport(url, logger, args.parser, args.dir)
+ if args.js is False:
+ exportWp.downloadJs()
+
+ if args.css is False:
+ exportWp.downloadCss()
+
+ if args.html is False or args.img is False:
+ webpage = exportWp.getUrlPage()
+ if args.html is False:
+ exportWp.downloadHTML(webpage)
+
+ if args.img is False:
+ exportWp.downloadImg(webpage)
\ No newline at end of file
From 19c62f38d40b3d92e9ece6e6af5b380fe17c0bdf Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 9 Apr 2023 22:50:41 +0200
Subject: [PATCH 48/60] add exit
---
import_export_canalblog.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index b38d5be..edce4ab 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -95,4 +95,5 @@ if __name__ == '__main__':
exportWp.downloadHTML(webpage)
if args.img is False:
- exportWp.downloadImg(webpage)
\ No newline at end of file
+ exportWp.downloadImg(webpage)
+ exit(0)
\ No newline at end of file
From cd50e45493ae05b2846fb3fb2816524acf4c894f Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Sun, 9 Apr 2023 23:49:10 +0200
Subject: [PATCH 49/60] fix WPExport
---
WPExport.py | 14 +++++++-------
import_export_canalblog.py | 5 +++--
2 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/WPExport.py b/WPExport.py
index 6db20ad..9657cf6 100644
--- a/WPExport.py
+++ b/WPExport.py
@@ -25,7 +25,7 @@ class WPExport:
def downloadCss(self):
css = self._getScriptCss(False, True)
o = urlparse(self._url)
- self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
+ self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))
# Download HTML
@@ -79,9 +79,9 @@ class WPExport:
for j in range(1,int(number_lastpage)):
paging = j * 10
categorie = urlparse(i).path.split("/")
- url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
+ url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
if len(categorie) > 2:
- url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
+ url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
self._logger.info(url_paging)
if url_paging not in webpage:
webpage.append(url_paging)
@@ -105,7 +105,7 @@ class WPExport:
# Private method
#
# Create path
- def _mkdirPath(self, path_dir, logger):
+ def _mkdirPath(self, path_dir):
if not os.path.exists(path_dir):
makedir = []
pathh = path_dir.split("/")
@@ -126,7 +126,7 @@ class WPExport:
# Get Css and JS
def _getScriptCss(self, js, css):
try:
- page = requests.get(url)
+ page = requests.get(self._url)
except Exception as err:
self._logger.error("Connection error : {0}".format(err))
exit(1)
@@ -139,7 +139,7 @@ class WPExport:
src = anchor.get("src", "/")
if src != "/":
try:
- u = urlparse(url)
+ u = urlparse(self._url)
o = urlparse(src)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
@@ -156,7 +156,7 @@ class WPExport:
href = anchor.get("href", "/")
if href != "/":
try:
- u = urlparse(url)
+ u = urlparse(self._url)
o = urlparse(href)
except Exception as err:
self._logger.error("parsing error : {0}".format(err))
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index edce4ab..6b8325d 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -1,8 +1,9 @@
#!/usr/bin/python3
from requests.auth import HTTPBasicAuth
from getpass import getpass
+from urllib.parse import urlparse
import argparse, logging
-import WPImport
+import WPImport, WPExport
if __name__ == '__main__':
@@ -82,7 +83,7 @@ if __name__ == '__main__':
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
- exportWp = WPExport.WPExport(url, logger, args.parser, args.dir)
+ exportWp = WPExport.WPExport(url, logger, args.parser, args.directory)
if args.js is False:
exportWp.downloadJs()
From e74dfc2b73438eebaf64385409805344ec40793a Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 00:00:01 +0200
Subject: [PATCH 50/60] add import from url
---
WPImport.py | 12 ++++++++++++
import_export_canalblog.py | 17 +++++++++++++++--
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/WPImport.py b/WPImport.py
index 92f6681..d17adce 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -14,6 +14,18 @@ class WPimport:
# Public method
+ def fromUrl(self, webpage):
+ for page in webpage:
+ r = requests.get(page)
+ if r.status_code == 200:
+ soup = BeautifulSoup(r.content, self._parser)
+ articlebody = soup.find_all("div", class_="articlebody")
+ if len(articlebody) > 0:
+ self._addOrUpdatePost(soup)
+ else:
+ self._addOrUpdateFeaturedMedia(soup)
+
+
def fromDirectory(self, directory):
directory = "{0}/archives".format(directory)
directories = self._getDirectories([], "{0}".format(directory))
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 6b8325d..96314ec 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -37,7 +37,7 @@ if __name__ == '__main__':
args = parser.parse_args()
- logger = logging.getLogger('insert wordpress')
+ logger = logging.getLogger('import export canalblog')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.quiet is False:
@@ -74,7 +74,20 @@ if __name__ == '__main__':
exit(0)
if len(args.directory) > 0:
importWp.fromDirectory(args.directory)
- exit(0)
+ exit(0)
+ if len(args.canalblog) > 0:
+ try:
+ o = urlparse(args.canalblog)
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ exit(1)
+ exportWp = WPExport.WPExport(url, logger, args.parser, args.directory)
+ webpage = exportWp.getUrlPage()
+ importWp.fromUrl(webpage)
+
+
if args.command == "export":
try:
o = urlparse(args.url)
From ed78f22f2e920c1fdbda967a93f817a7bc5781e9 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 11:05:32 +0200
Subject: [PATCH 51/60] fix WPImport from URL
---
WPImport.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/WPImport.py b/WPImport.py
index d17adce..bb41e9c 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -15,9 +15,10 @@ class WPimport:
# Public method
def fromUrl(self, webpage):
- for page in webpage:
- r = requests.get(page)
+ for i in range(0, len(webpage)):
+ r = requests.get(webpage[i])
if r.status_code == 200:
+ self._logger.info("({0}/{1} : Page en cours d'import : {2}".format(i+1, len(webpage), webpage[i]))
soup = BeautifulSoup(r.content, self._parser)
articlebody = soup.find_all("div", class_="articlebody")
if len(articlebody) > 0:
From 4ddc4a7cd3358774af22942fdb38ece0e0c11f23 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 15:41:14 +0200
Subject: [PATCH 52/60] rm web_scrap + add set url + add backup1 to gitignore
---
.gitignore | 1 +
WPExport.py | 5 ++
WPImport.py | 3 +
web_scrap.py | 241 ---------------------------------------------------
4 files changed, 9 insertions(+), 241 deletions(-)
delete mode 100644 web_scrap.py
diff --git a/.gitignore b/.gitignore
index d9171fe..6280820 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
backup/
+backup1/
web_scrap.log
diff --git a/WPExport.py b/WPExport.py
index 9657cf6..c5b6be9 100644
--- a/WPExport.py
+++ b/WPExport.py
@@ -13,6 +13,11 @@ class WPExport:
# Public method
+ # Set URL
+
+ def setUrl(self, url):
+ self._url = url
+
# Download JS
def downloadJs(self):
diff --git a/WPImport.py b/WPImport.py
index bb41e9c..339a8c0 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -14,6 +14,9 @@ class WPimport:
# Public method
+ def setUrl(self, wordpress):
+ self._wordpress = wordpress
+
def fromUrl(self, webpage):
for i in range(0, len(webpage)):
r = requests.get(webpage[i])
diff --git a/web_scrap.py b/web_scrap.py
deleted file mode 100644
index adf2988..0000000
--- a/web_scrap.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/python3
-from bs4 import BeautifulSoup
-from urllib.parse import urlparse
-import requests, os, argparse, logging
-
-def mkdirPath(path_dir, logger):
- if not os.path.exists(path_dir):
- makedir = []
- pathh = path_dir.split("/")
- for i in pathh:
- makedir.append(i)
- repath = "/".join(makedir)
- if not os.path.exists(repath):
- logger.debug("Dossier crée : {0}".format(repath))
- try:
- if len(repath) > 0:
- os.mkdir(repath)
- except Exception as err:
- logger.error("Directory error : {0}".format(err))
- logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
- exit(1)
-
-
-def getScriptCss(url, js, css, logger):
- try:
- page = requests.get(url)
- except Exception as err:
- logger.error("Connection error : {0}".format(err))
- exit(1)
- page_url = []
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
- if js is True:
- script = soup.find_all("script")
- for anchor in script:
- src = anchor.get("src", "/")
- if src != "/":
- try:
- u = urlparse(url)
- o = urlparse(src)
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- exit(1)
- if o.netloc == "":
- o = o._replace(netloc=u.netloc)
- o = o._replace(scheme=u.scheme)
- page_url.append(o.geturl())
- if css is True:
- link = soup.find_all("link")
- for anchor in link:
- rel = anchor.get("rel")
- if rel[0] == "stylesheet":
- href = anchor.get("href", "/")
- if href != "/":
- try:
- u = urlparse(url)
- o = urlparse(href)
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- exit(1)
- if o.netloc == "":
- o = o._replace(netloc=u.netloc)
- o = o._replace(scheme=u.scheme)
- page_url.append(o.geturl())
-
-
- return page_url
-
-def getImg(webpage, logger):
- page_img = []
- for i in webpage:
- try:
- page = requests.get(i)
- except Exception as err:
- logger.error("Connection error : {0}".format(err))
- exit(1)
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
- img = soup.find_all("img")
- logger.info("image from page: {0} : ".format(i))
- for anchor in img:
- src = anchor.get("src", "/")
- if src != "/":
- if src not in page_img:
- logger.info("image: {0} : ".format(src))
- page_img.append(src)
-
-
- return page_img
-
-def getUrlPage(url, logger):
- try:
- page = requests.get(url)
- except Exception as err:
- logger.error("Connection error : {0}".format(err))
- exit(1)
- page_url = []
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
- ul = soup.find_all("ul", id="listsmooth")
- for anchor in ul[0].find_all("a"):
- href = anchor.get('href', '/')
- if href != "#":
- page_url.append(href)
-
- webpage = []
- for i in page_url:
- try:
- page = requests.get(i)
- except Exception as err:
- logger.error("Connection error : {0}".format(err))
- exit(1)
- if page.status_code == 200:
- logger.info("page : {0}".format(i))
- if i not in webpage:
- webpage.append(i)
- soup = BeautifulSoup(page.text, 'html.parser')
- class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
- if len(class_div) > 0:
- pagingfirstline = class_div[0].find_all("a")
- if len(pagingfirstline) > 1:
- lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
- element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
- number_page = element_lastpage.split("-")[0].split("p")[1]
- number_lastpage = int(number_page) / 10
- for j in range(1,int(number_lastpage)):
- paging = j * 10
- categorie = urlparse(i).path.split("/")
- url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
- if len(categorie) > 2:
- url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
- logger.info(url_paging)
- if url_paging not in webpage:
- webpage.append(url_paging)
- page = requests.get(url_paging)
- if page.status_code == 200:
- soup = BeautifulSoup(page.text, 'html.parser')
- h2 = soup.find_all("h2")
- for title in h2:
- href = title.find_all("a")[0].get("href", "/")
- if href not in webpage:
- try:
- o = urlparse(href)
- o = o._replace(scheme="https").geturl()
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- exit(1)
- webpage.append(o)
- return webpage
-
-
-def downloadPage(webpage, backup_dir, logger):
-
- for i in range(0, len(webpage)):
- try:
- o = urlparse(webpage[i])
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- exit(1)
- path_web = o.path.split("/")
- filePageWeb = path_web[len(path_web)-1]
- path_web.pop(len(path_web)-1)
- dir_page_web = "/".join(path_web)
- mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web), logger)
- try:
- r = requests.get(webpage[i])
- except Exception as err:
- logger.error("Connection error : {0}".format(err))
- exit(1)
- if r.status_code == 200:
- fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
- if len(dir_page_web) > 0 and len(filePageWeb) > 0:
- fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
- logger.info("{0}/{1} : {2}".format(i+1, len(webpage), fileDownload))
- try:
- open(fileDownload, "wb").write(r.content)
- except Exception as err:
- logger.error("file error : {0}".format(err))
- exit(1)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument("--url", help="canblog URL to be scraping", required=True)
- parser.add_argument("--dir",
- default="backup",
- help="backup file path")
- parser.add_argument("--debug", help="Verbosity", action="store_true")
- parser.add_argument("--logfile", help="Log file", default="")
- parser.add_argument("--no-css", help="No CSS", dest="css", action="store_true")
- parser.add_argument("--no-js", help="No JS", dest="js", action="store_true")
- parser.add_argument("--no-img", help="No img", dest="img", action="store_true")
- parser.add_argument("--no-html", help="No HTML", dest="html", action="store_true")
- parser.add_argument("--quiet", help="No console output", action="store_true")
- args = parser.parse_args()
- logger = logging.getLogger('web_scrap')
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-
- if args.quiet is False:
- ch = logging.StreamHandler()
- if args.debug is True:
- logger.setLevel(logging.DEBUG)
- ch.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
- ch.setLevel(logging.INFO)
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
-
- if len(args.logfile) > 0:
- fileHandler = logging.FileHandler(args.logfile)
- if args.debug is True:
- fileHandler.setLevel(logging.DEBUG)
- else:
- fileHandler.setLevel(logging.INFO)
- fileHandler.setFormatter(formatter)
- logger.addHandler(fileHandler)
-
- try:
- o = urlparse(args.url)
- o = o._replace(scheme="https")
- url = o.geturl().replace(":///", "://")
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- if args.js is False:
- script = getScriptCss(url, True, False, logger)
- downloadPage(script, "{0}/{1}/{2}".format(args.dir, o.path, "dists/js"), logger)
-
- if args.css is False:
- css = getScriptCss(url, False, True, logger)
- downloadPage(css, "{0}/{1}/{2}".format(args.dir, o.path, "dists/css"), logger)
-
- if args.html is False or args.img is False:
- webpage = getUrlPage(url, logger)
- if args.html is False:
- downloadPage(webpage, args.dir, logger)
-
- if args.img is False:
- page_src = getImg(webpage, logger)
- downloadPage(page_src, "{0}/{1}/{2}".format(args.dir, o.path, "img"), logger)
From aa5c8893ec8051344ea5d5fa162bf99115e93299 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 16:02:40 +0200
Subject: [PATCH 53/60] loop for url
---
import_export_canalblog.py | 56 +++++++++++++++++++++-----------------
1 file changed, 31 insertions(+), 25 deletions(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 96314ec..817c905 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -76,38 +76,44 @@ if __name__ == '__main__':
importWp.fromDirectory(args.directory)
exit(0)
if len(args.canalblog) > 0:
+ exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
+ canalblog = args.canalblog.split(",")
+ for canal in canalblog:
+ try:
+ o = urlparse(canal)
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ exit(1)
+ exportWp.setUrl(url)
+ webpage = exportWp.getUrlPage()
+ importWp.fromUrl(webpage)
+
+
+ if args.command == "export":
+ canalblog = args.url.split(",")
+ exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
+ for canal in canalblog:
try:
- o = urlparse(args.canalblog)
+ o = urlparse(canal)
o = o._replace(scheme="https")
url = o.geturl().replace(":///", "://")
except Exception as err:
logger.error("parsing error : {0}".format(err))
exit(1)
- exportWp = WPExport.WPExport(url, logger, args.parser, args.directory)
- webpage = exportWp.getUrlPage()
- importWp.fromUrl(webpage)
-
-
- if args.command == "export":
- try:
- o = urlparse(args.url)
- o = o._replace(scheme="https")
- url = o.geturl().replace(":///", "://")
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
- exit(1)
- exportWp = WPExport.WPExport(url, logger, args.parser, args.directory)
- if args.js is False:
- exportWp.downloadJs()
+ exportWp.setUrl(url)
+ if args.js is False:
+ exportWp.downloadJs()
- if args.css is False:
- exportWp.downloadCss()
+ if args.css is False:
+ exportWp.downloadCss()
- if args.html is False or args.img is False:
- webpage = exportWp.getUrlPage()
- if args.html is False:
- exportWp.downloadHTML(webpage)
+ if args.html is False or args.img is False:
+ webpage = exportWp.getUrlPage()
+ if args.html is False:
+ exportWp.downloadHTML(webpage)
- if args.img is False:
- exportWp.downloadImg(webpage)
+ if args.img is False:
+ exportWp.downloadImg(webpage)
exit(0)
\ No newline at end of file
From 48e77084e83e4e703678227bbfb30a88b0fa0b39 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 16:07:14 +0200
Subject: [PATCH 54/60] remove newline
---
WPImport.py | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/WPImport.py b/WPImport.py
index 339a8c0..9e2cec6 100644
--- a/WPImport.py
+++ b/WPImport.py
@@ -278,8 +278,4 @@ class WPimport:
page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
if page.status_code == 201:
self._logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
- self._linkImgPost(result["title"]["raw"], list_img, result["id"])
-
-
-
-
+ self._linkImgPost(result["title"]["raw"], list_img, result["id"])
\ No newline at end of file
From 7c75116c5b9a8cb6725a581b18db3b0b9d02eacc Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 16:15:13 +0200
Subject: [PATCH 55/60] add url list
---
import_export_canalblog.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 817c905..3a6c748 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -68,12 +68,17 @@ if __name__ == '__main__':
exit(1)
basic = HTTPBasicAuth(args.user, password)
- importWp = WPImport.WPimport(basic, args.wordpress, logger, args.parser)
+ wordpress = args.wordpress.split(",")
+ importWp = WPImport.WPimport(basic, "", logger, args.parser)
if len(args.file) > 0:
- importWp.fromFile(args.file.split(","))
+ for i in wordpress:
+ importWp.setUrl(i)
+ importWp.fromFile(args.file.split(","))
exit(0)
if len(args.directory) > 0:
- importWp.fromDirectory(args.directory)
+ for i in wordpress:
+ importWp.setUrl(i)
+ importWp.fromDirectory(args.directory)
exit(0)
if len(args.canalblog) > 0:
exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
From 05a3a28c6fd12724985b1c24cde625d6e18cd21d Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Mon, 10 Apr 2023 16:36:49 +0200
Subject: [PATCH 56/60] add serial for url
---
import_export_canalblog.py | 60 +++++++++++++++++++++++++++++---------
1 file changed, 47 insertions(+), 13 deletions(-)
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 3a6c748..785fbfd 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -21,6 +21,8 @@ if __name__ == '__main__':
import_parser.add_argument("--directory", help="HTML directory", default="")
import_parser.add_argument("--canalblog", help="URL Canalblog", default="")
import_parser.add_argument("--wordpress", help="URL Wordpress", required=True)
+ import_parser.add_argument("--serial", help="Serial execution", action="store_true")
+
export_parser = subparsers.add_parser("export")
@@ -76,24 +78,56 @@ if __name__ == '__main__':
importWp.fromFile(args.file.split(","))
exit(0)
if len(args.directory) > 0:
- for i in wordpress:
- importWp.setUrl(i)
- importWp.fromDirectory(args.directory)
+ directory = args.directory.split(",")
+ if args.serial is False:
+ for i in wordpress:
+ importWp.setUrl(i)
+ for j in directory:
+ importWp.fromDirectory(j)
+ else:
+ if len(directory) != len(wordpress):
+ logger.error("ERREUR : Le nombre de dossier n'est pas equivalent au nombre d'URL wordpress")
+ exit(1)
+ for i in range(0, len(wordpress)-1):
+ importWp.setUrl(wordpress[i])
+ importWp.fromDirectory(directory[i])
exit(0)
if len(args.canalblog) > 0:
exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
canalblog = args.canalblog.split(",")
- for canal in canalblog:
- try:
- o = urlparse(canal)
- o = o._replace(scheme="https")
- url = o.geturl().replace(":///", "://")
- except Exception as err:
- logger.error("parsing error : {0}".format(err))
+ wordpress = args.wordpress.split(",")
+
+ if serial is False:
+ for canal in canalblog:
+ try:
+ o = urlparse(canal)
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ exit(1)
+ exportWp.setUrl(url)
+ webpage = exportWp.getUrlPage()
+ for j in wordpress:
+ importWp.setUrl(j)
+ importWp.fromUrl(webpage)
+ else:
+ if len(canalblog) != len(wordpress):
+ logger.error("ERREUR : Le nombre de dossier n'est pas equivalent au nombre d'URL wordpress")
exit(1)
- exportWp.setUrl(url)
- webpage = exportWp.getUrlPage()
- importWp.fromUrl(webpage)
+ for i in range(0, len(canalblog)-1):
+ try:
+ o = urlparse(canalblog[i])
+ o = o._replace(scheme="https")
+ url = o.geturl().replace(":///", "://")
+ except Exception as err:
+ logger.error("parsing error : {0}".format(err))
+ exit(1)
+ exportWp.setUrl(url)
+ webpage = exportWp.getUrlPage()
+ importWp.setUrl(wordpress[i])
+ importWp.fromUrl(webpage)
+
if args.command == "export":
From 7848968fa133f583d40fe523a9e2e8d2fa9312a5 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 11 Apr 2023 22:15:36 +0200
Subject: [PATCH 57/60] Organisation class in a folder
---
.gitignore | 1 +
import_export_canalblog.py | 11 ++++++-----
WPExport.py => lib/WPExport.py | 0
WPImport.py => lib/WPImport.py | 0
lib/__init__.py | 0
5 files changed, 7 insertions(+), 5 deletions(-)
rename WPExport.py => lib/WPExport.py (100%)
rename WPImport.py => lib/WPImport.py (100%)
create mode 100644 lib/__init__.py
diff --git a/.gitignore b/.gitignore
index 6280820..1dfd775 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
backup/
backup1/
web_scrap.log
+__pycache__/
diff --git a/import_export_canalblog.py b/import_export_canalblog.py
index 785fbfd..6470699 100644
--- a/import_export_canalblog.py
+++ b/import_export_canalblog.py
@@ -3,7 +3,8 @@ from requests.auth import HTTPBasicAuth
from getpass import getpass
from urllib.parse import urlparse
import argparse, logging
-import WPImport, WPExport
+from lib.WPImport import WPimport
+from lib.WPExport import WPExport
if __name__ == '__main__':
@@ -71,7 +72,7 @@ if __name__ == '__main__':
basic = HTTPBasicAuth(args.user, password)
wordpress = args.wordpress.split(",")
- importWp = WPImport.WPimport(basic, "", logger, args.parser)
+ importWp = WPimport(basic, "", logger, args.parser)
if len(args.file) > 0:
for i in wordpress:
importWp.setUrl(i)
@@ -93,11 +94,11 @@ if __name__ == '__main__':
importWp.fromDirectory(directory[i])
exit(0)
if len(args.canalblog) > 0:
- exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
+ exportWp = WPExport("", logger, args.parser, args.directory)
canalblog = args.canalblog.split(",")
wordpress = args.wordpress.split(",")
- if serial is False:
+ if args.serial is False:
for canal in canalblog:
try:
o = urlparse(canal)
@@ -132,7 +133,7 @@ if __name__ == '__main__':
if args.command == "export":
canalblog = args.url.split(",")
- exportWp = WPExport.WPExport("", logger, args.parser, args.directory)
+ exportWp = WPExport("", logger, args.parser, args.directory)
for canal in canalblog:
try:
o = urlparse(canal)
diff --git a/WPExport.py b/lib/WPExport.py
similarity index 100%
rename from WPExport.py
rename to lib/WPExport.py
diff --git a/WPImport.py b/lib/WPImport.py
similarity index 100%
rename from WPImport.py
rename to lib/WPImport.py
diff --git a/lib/__init__.py b/lib/__init__.py
new file mode 100644
index 0000000..e69de29
From a856311f044ce6ab3fd69d887ffdb6540373c25b Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 11 Apr 2023 22:30:00 +0200
Subject: [PATCH 58/60] add method for comment
---
lib/WPImport.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/lib/WPImport.py b/lib/WPImport.py
index 9e2cec6..f5d68cb 100644
--- a/lib/WPImport.py
+++ b/lib/WPImport.py
@@ -141,6 +141,15 @@ class WPimport:
media["id"] = res["id"]
media["rendered"] = res["guid"]["rendered"]
return media
+
+ ## Add or update comment
+
+ def _addOrUpdateComment(self, post, comment):
+ for i in comment:
+ data = {"post": post, "content": i["content"], "date": i["date"], "author_name": i["author"]}
+ page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
+ if page.status_code == 201:
+ self._logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
## Add or Update post
@@ -273,9 +282,5 @@ class WPimport:
if page.status_code == 201:
result = page.json()
self._logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
- for i in comment_post:
- data = {"post": result["id"], "content": i["content"], "date": i["date"], "author_name": i["author"]}
- page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
- if page.status_code == 201:
- self._logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ self._addOrUpdateComment(result["id"], comment_post)
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
\ No newline at end of file
From 335266e1adff73b4387a3214f99daeb730243804 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 11 Apr 2023 23:26:40 +0200
Subject: [PATCH 59/60] update comment
---
lib/WPImport.py | 30 +++++++++++++++++++++++-------
1 file changed, 23 insertions(+), 7 deletions(-)
diff --git a/lib/WPImport.py b/lib/WPImport.py
index f5d68cb..6987877 100644
--- a/lib/WPImport.py
+++ b/lib/WPImport.py
@@ -144,12 +144,27 @@ class WPimport:
## Add or update comment
- def _addOrUpdateComment(self, post, comment):
- for i in comment:
- data = {"post": post, "content": i["content"], "date": i["date"], "author_name": i["author"]}
- page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
- if page.status_code == 201:
- self._logger.info("Commentaire ajoute pour {0}".format(result["title"]["raw"]))
+ def _addOrUpdateComment(self, post, comment, title):
+ params = {"post": post}
+ block = True
+ page = requests.get("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, params=params)
+ if page.status_code == 200:
+ result = page.json()
+ for i in comment:
+ comment_exist = False
+ for j in result:
+ if i["author"] == j["author_name"] and i["date"] == j["date"]:
+ comment_exist = True
+ id_comment = j["id"]
+ data = {"post": post, "content": i["content"], "date": i["date"], "author_name": i["author"]}
+ if comment_exist is True:
+ page = page = requests.post("http://{0}/wp-json/wp/v2/comments/{1}".format(self._wordpress, id_comment), auth=self._basic, data=data)
+ if page.status_code == 200:
+ self._logger.info("Commentaire mise à jour pour {0}".format(title))
+ else:
+ page = requests.post("http://{0}/wp-json/wp/v2/comments".format(self._wordpress), auth=self._basic, data=data)
+ if page.status_code == 201:
+ self._logger.info("Commentaire ajoute pour {0}".format(title))
## Add or Update post
@@ -273,6 +288,7 @@ class WPimport:
if page.status_code == 200:
result = page.json()
self._logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
+ self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
@@ -282,5 +298,5 @@ class WPimport:
if page.status_code == 201:
result = page.json()
self._logger.info("Article ajoute : {0}".format(result["title"]["raw"]))
- self._addOrUpdateComment(result["id"], comment_post)
+ self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
\ No newline at end of file
From 76d27718860afaf317304b975fb4b7ac74122767 Mon Sep 17 00:00:00 2001
From: Valentin CZERYBA
Date: Tue, 11 Apr 2023 23:27:41 +0200
Subject: [PATCH 60/60] remove newline useless
---
lib/WPImport.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/lib/WPImport.py b/lib/WPImport.py
index 6987877..60e8cbc 100644
--- a/lib/WPImport.py
+++ b/lib/WPImport.py
@@ -290,9 +290,7 @@ class WPimport:
self._logger.info("Article mis à jour : {0}".format(result["title"]["raw"]))
self._addOrUpdateComment(result["id"], comment_post, result["title"]["raw"])
self._linkImgPost(result["title"]["raw"], list_img, result["id"])
-
-
-
+
if page_exist == False:
page = requests.post("http://{0}/wp-json/wp/v2/posts".format(self._wordpress), auth=self._basic, headers=headers, data=json.dumps(data))
if page.status_code == 201: