create function for every task
This commit is contained in:
parent
4de811c607
commit
a3aceccba7
106
web_scrap.py
106
web_scrap.py
@ -19,53 +19,71 @@ def mkdir_path(path_dir):
|
||||
if not os.path.exists(repath):
|
||||
os.mkdir(repath)
|
||||
|
||||
mkdir_path(BACKUP_DIR)
|
||||
|
||||
|
||||
page = requests.get("https://{0}".format(URL))
|
||||
|
||||
page_url = []
|
||||
if page.status_code == 200:
|
||||
soup = BeautifulSoup(page.text, 'html.parser')
|
||||
ul = soup.find_all("ul", id="listsmooth")
|
||||
for anchor in ul[0].find_all("a"):
|
||||
href = anchor.get('href', '/')
|
||||
if href != "#":
|
||||
page_url.append(href)
|
||||
|
||||
webpage = []
|
||||
for i in page_url:
|
||||
page = requests.get(i)
|
||||
def getUrlPage(url):
|
||||
print(url)
|
||||
page = requests.get(url)
|
||||
page_url = []
|
||||
if page.status_code == 200:
|
||||
print("page : {0}".format(i))
|
||||
soup = BeautifulSoup(page.text, 'html.parser')
|
||||
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
|
||||
if len(class_div) > 0:
|
||||
pagingfirstline = class_div[0].find_all("a")
|
||||
if len(pagingfirstline) > 1:
|
||||
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
|
||||
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
|
||||
number_page = element_lastpage.split("-")[0].split("p")[1]
|
||||
number_lastpage = int(number_page) / 10
|
||||
for j in range(1,int(number_lastpage)):
|
||||
paging = j * 10
|
||||
categorie = urlparse(i).path.split("/")
|
||||
url_paging = "https://{0}/archives/p{1}-10.html".format(URL, paging)
|
||||
if len(categorie) > 2:
|
||||
url_paging = "https://{0}/archives/{1}/p{2}-10.html".format(URL, categorie[2], paging)
|
||||
print(url_paging)
|
||||
page = requests.get(url_paging)
|
||||
if page.status_code == 200:
|
||||
soup = BeautifulSoup(page.text, 'html.parser')
|
||||
h2 = soup.find_all("h2")
|
||||
for title in h2:
|
||||
href = title.find_all("a")[0].get("href", "/")
|
||||
if href not in webpage:
|
||||
webpage.append(href)
|
||||
ul = soup.find_all("ul", id="listsmooth")
|
||||
for anchor in ul[0].find_all("a"):
|
||||
href = anchor.get('href', '/')
|
||||
if href != "#":
|
||||
page_url.append(href)
|
||||
|
||||
for i in webpage:
|
||||
o = urlparse(i)
|
||||
path_web = o.path.split("/")
|
||||
path_web.pop(len(path_web)-1)
|
||||
dir_page_web = "/".join(path_web)
|
||||
mkdir_path("{0}/{1}".format(BACKUP_DIR, dir_page_web))
|
||||
webpage = []
|
||||
for i in page_url:
|
||||
page = requests.get(i)
|
||||
if page.status_code == 200:
|
||||
print("page : {0}".format(i))
|
||||
if i not in webpage:
|
||||
webpage.append(i)
|
||||
soup = BeautifulSoup(page.text, 'html.parser')
|
||||
class_div = pagingfirstline = soup.find_all("div", class_="pagingfirstline")
|
||||
if len(class_div) > 0:
|
||||
pagingfirstline = class_div[0].find_all("a")
|
||||
if len(pagingfirstline) > 1:
|
||||
lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
|
||||
element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
|
||||
number_page = element_lastpage.split("-")[0].split("p")[1]
|
||||
number_lastpage = int(number_page) / 10
|
||||
for j in range(1,int(number_lastpage)):
|
||||
paging = j * 10
|
||||
categorie = urlparse(i).path.split("/")
|
||||
url_paging = "{0}/archives/p{1}-10.html".format(url, paging)
|
||||
if len(categorie) > 2:
|
||||
url_paging = "{0}/archives/{1}/p{2}-10.html".format(url, categorie[2], paging)
|
||||
print(url_paging)
|
||||
if url_paging not in webpage:
|
||||
webpage.append(url_paging)
|
||||
page = requests.get(url_paging)
|
||||
if page.status_code == 200:
|
||||
soup = BeautifulSoup(page.text, 'html.parser')
|
||||
h2 = soup.find_all("h2")
|
||||
for title in h2:
|
||||
href = title.find_all("a")[0].get("href", "/")
|
||||
if href not in webpage:
|
||||
o = urlparse(href)
|
||||
o = o._replace(scheme="https").geturl()
|
||||
webpage.append(o)
|
||||
return webpage
|
||||
|
||||
|
||||
def downloadPage(url):
|
||||
o = urlparse(url)
|
||||
o = o._replace(scheme="https")
|
||||
o = o._replace(fragment="")
|
||||
webpage = getUrlPage(o.geturl().replace(":///", "://"))
|
||||
for i in webpage:
|
||||
o = urlparse(i)
|
||||
path_web = o.path.split("/")
|
||||
path_web.pop(len(path_web)-1)
|
||||
dir_page_web = "/".join(path_web)
|
||||
mkdir_path("{0}/{1}".format(BACKUP_DIR, dir_page_web))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
downloadPage(URL)
|
Loading…
x
Reference in New Issue
Block a user