#!/usr/bin/python3
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import requests, os, argparse, logging, json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

class WPExport:
    def __init__(self, name = "Thread-0", url = "", logger = None, parser = "html.parser", directory = "backup", ssl_canalblog=True, tmp="/tmp/import_export_canablog"):
        self._url = url
        self._logger = logger
        self._parser = parser
        self._dir = directory
        self._name = name
        self._protocol = "https"
        if ssl_canalblog is False:
            self._protocol = "http"
        
        self._request = requests.Session()

        retries = Retry(total=10,
                status_forcelist=[429, 500, 502, 503, 504], backoff_factor=2)

        self._request.mount('{0}://'.format(self._protocol), HTTPAdapter(max_retries=retries))
        self._tmp = tmp

    # Destructor 
    def __del__(self):
        self._logger.info("{0} : Export finished for {1}".format(self._name, self._url))

    # Public method


    # Set name

    def setName(self, name):
        self._name = "Thread-{0}".format(int(name) + 1)

    # Set URL

    def setUrl(self, url):
        self._url = url

    # Download JS

    def downloadJs(self):
        script = self._getScriptCss(True, False)
        o = urlparse(self._url)
        self._downloadPage(script, "{0}/{1}/{2}".format(self._dir, o.path, "dists/js"))

    # Download CSS

    def downloadCss(self):
        css = self._getScriptCss(False, True)
        o = urlparse(self._url)
        self._downloadPage(css, "{0}/{1}/{2}".format(self._dir, o.path, "dists/css"))

    # Download HTML

    def downloadHTML(self, first, second):
        try:
            with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
                webpage = json.loads(file.read())
                self._downloadPage(webpage[first][second], self._dir)
        except Exception as ex:
            self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))

    # Download Image
    
    def downloadImg(self, first, second):
        try:
            with open("{0}/{1}.json".format(self._tmp, self._name)) as file:
                webpage = json.loads(file.read())
                page_src = self._getImg(webpage[first][second])
                o = urlparse(self._url)
                self._downloadPage(page_src, "{0}/{1}/{2}".format(self._dir, o.path, "img"))
        except Exception as ex:
            self._logger.error("{0} : Read file json from tmp : {1}".format(self._name, ex))




     # Get URL
    def getUrlPage(self, index_thread, max_thread):
        try:
            page = self._request.get(self._url)
        
            page_url = []
            if page.status_code == 200:
                soup = BeautifulSoup(page.text, self._parser)
                ul = soup.find_all("ul", id="listsmooth")
                for anchor in ul[0].find_all("a"):
                    href = anchor.get('href', '/')
                    if href != "#":
                        page_url.append(href)
            else:
                self._logger.error("{0} : URL did not get due status code : {1}".format(self._name, page.status_code))
                self._logger.debug("{0} : {1}".format(self._name, page.content))
        except ConnectionError as err:
            self._logger.error("{0} : Connection error : {1}".format(self._name, err))
            exit(1)
        except Exception as err:
            self._logger.error("{0} : Exception error : {1}".format(self._name, err))

        webpage = {"principal": {"page":[], "article":[]}, "publications": {"page":[], "article":[]}}
        for i in page_url:
            section = "publications"
            o = urlparse(i)
            o = o._replace(scheme=self._protocol)
            i = o.geturl().replace(":///", "://")
            if i == "{0}/".format(self._url):
                section = "principal"
            try:
                page = self._request.get(i)
            
                if page.status_code == 200:
                    self._logger.info("{0} : page : {1}".format(self._name, i))
                    if i not in webpage[section]["page"]:
                        webpage[section]["page"].append(i)
                    soup = BeautifulSoup(page.text, self._parser)
                    class_div = soup.find_all("div", class_="pagingfirstline")
                    if len(class_div) > 0:
                        pagingfirstline = class_div[0].find_all("a")
                        if len(pagingfirstline) > 1:
                            lastpage = pagingfirstline[len(pagingfirstline)-1].get("href", "/")
                            self._logger.debug("{0} : Last page {1}".format(self._name, lastpage))

                            element_lastpage = lastpage.split("/")[len(lastpage.split("/"))-1]
                            number_page = element_lastpage.split("-")[0].split("p")[1]
                            number_lastpage = int(number_page) / 10

                            setPageDivided = int(number_lastpage) / max_thread
                            if setPageDivided > int(setPageDivided):
                                setPageDivided = setPageDivided + 1
                            setPagePart = setPageDivided * (index_thread + 1)
                            firstPagePart = (setPagePart - setPageDivided)

                            self._logger.debug("{0} : Total page : {1}".format(self._name,int(number_lastpage)))
                            self._logger.debug("{0} : First range : {1}".format(self._name, int(firstPagePart)))
                            self._logger.debug("{0} : Last range : {1}".format(self._name, int(setPagePart)))

                            for j in range(int(firstPagePart),int(setPagePart)+1):
                                paging = j * 10
                                categorie = urlparse(i).path.split("/")
                                url_paging = "{0}/archives/p{1}-10.html".format(self._url, paging)
                                if len(categorie) > 2:
                                    url_paging = "{0}/archives/{1}/p{2}-10.html".format(self._url, categorie[2], paging)
                                self._logger.info("{0} : {1}".format(self._name, url_paging))
                                if url_paging not in webpage[section]["page"]:
                                    webpage[section]["page"].append(url_paging)
                                page = self._request.get(url_paging)
                                if page.status_code == 200:
                                    soup = BeautifulSoup(page.text, self._parser)
                                    h2 = soup.find_all("h2")
                                    self._logger.debug("{0} : {1} H2 : {2}".format(self._name, url_paging, h2))
                                    for title in h2:
                                        self._logger.debug("{0} : {1} a : {2}".format(self._name, url_paging, title.find_all("a")))
                                        href = title.find_all("a")[0].get("href", "/")
                                        if href not in webpage[section]["article"]:
                                            try:
                                                o = urlparse(href)
                                                o = o._replace(scheme="https").geturl()
                                                webpage[section]["article"].append(o)
                                            except Exception as err:
                                                self._logger.error("parsing error : {0}".format(err))
                                                exit(1)
                else:
                    self._logger.error("{0} : web didn't get due status code : {1}".format(self._name, page.status_code))
                    self._logger.debug("{0} : {1}".format(self._name, page.content))
            except ConnectionError as err:
                self._logger.error("{0} : Connection error : {1}".format(self._name, err))
                exit(1)
            except Exception as err:
                self._logger.error("{0} : Exception error : {1}".format(self._name, err))
                exit(1)
        try:
            string_webpage = json.dumps(webpage)
            open("{0}/{1}.json".format(self._tmp, self._name), "wt").write(string_webpage)
        except Exception as ex:
            self._logger.error("{0} : Error for writing webpage : {1}".format(self._name, ex))



        


    # Private method
    # 
    # Create path        
    def _mkdirPath(self, path_dir):
        if not os.path.exists(path_dir):
            makedir = []
            pathh = path_dir.split("/")
            for i in pathh:
                makedir.append(i)
                repath = "/".join(makedir)
                if not os.path.exists(repath):
                    self._logger.debug("{0} : Dossier crée : {1}".format(self._name, repath))
                    try:
                        if len(repath) > 0:
                            os.mkdir(repath)
                    except Exception as err:
                        self._logger.error("Directory error : {0}".format(err))
                        self._logger.debug("Directory error : {0} {1} {2} {3} {4}".format(err, path_dir, repath, pathh, makedir))
                        exit(1)


    # Get Css and JS
    def _getScriptCss(self, js, css):
        try:
            page = self._request.get(self._url)
        
            page_url = []
            if page.status_code == 200:
                soup = BeautifulSoup(page.text, self._parser)
                if js is True:
                    script = soup.find_all("script")
                    for anchor in script:
                        src = anchor.get("src", "/")
                        if src != "/":
                            try:
                                u = urlparse(self._url)
                                o = urlparse(src)
                                if o.netloc == "":
                                    o = o._replace(netloc=u.netloc)
                                    o = o._replace(scheme=u.scheme)
                                page_url.append(o.geturl())
                            except Exception as err:
                                self._logger.error("parsing error : {0}".format(err))
                                exit(1)
                            
                if css is True:
                    link = soup.find_all("link")
                    for anchor in link:
                        rel = anchor.get("rel")
                        if rel[0] == "stylesheet":
                            href = anchor.get("href", "/")
                            if href != "/":
                                try:
                                    u = urlparse(self._url)
                                    o = urlparse(href)
                                    if o.netloc == "":
                                        o = o._replace(netloc=u.netloc)
                                        o = o._replace(scheme=u.scheme)
                                    page_url.append(o.geturl())    
                                except Exception as err:
                                    self._logger.error("parsing error : {0}".format(err))
                                    exit(1)
                                   
            else:
                self._logger.error("JS or CSS did not get due status code : {0}".format(page.status_code))
                self._logger.debug(page.content)
        except ConnectionError as err:
            self._logger.error("Connection error : {0}".format(err))
            exit(1)
        except Exception as err:
            self._logger.error("Exception error : {0}".format(err))

        return page_url

    # Get image

    def _getImg(self, webpage):
        page_img = []
        for i in webpage:
            try:
                page = self._request.get(i)
            
                if page.status_code == 200:
                    soup = BeautifulSoup(page.text, self._parser)
                    img = soup.find_all("img")
                    self._logger.info("{0} : image from page: {1} : ".format(self._name,i))
                    for anchor in img:
                        src = anchor.get("src", "/")
                        if src != "/":
                            if src not in page_img:
                                self._logger.info("{0} : image: {1} : ".format(self._name, src))
                                page_img.append(src)
                else:
                    self._logger.error("{0} : Image did not get due status code : {1}".format(self._name, page.status_code))
                    self._logger.debug("{0} : {1}".format(self._name, page.content))
            except ConnectionError as err:
                self._logger.error("{0} : Connection error : {1}".format(self._name, err))
                exit(1)
            except Exception as err:
                self._logger.error("{0} : Exception error : {1}".format(self._name, err))

        return page_img


    # Download page
    def _downloadPage(self, webpage, backup_dir):
   
        for i in range(0, len(webpage)):
            try:
                o = urlparse(webpage[i])
            
                path_web = o.path.split("/")
                filePageWeb = path_web[len(path_web)-1]
                path_web.pop(len(path_web)-1)
                dir_page_web = "/".join(path_web)
                self._mkdirPath("{0}/{1}/{2}".format(backup_dir, o.netloc, dir_page_web))
                try:
                    r = self._request.get(webpage[i])
                
                    if r.status_code == 200:
                        fileDownload = "{0}/{1}/index.html".format(backup_dir, o.netloc)
                        if len(dir_page_web) > 0 and len(filePageWeb) > 0:
                            fileDownload = "{0}/{1}{2}/{3}".format(backup_dir, o.netloc, dir_page_web, filePageWeb)
                        self._logger.info("{0} : {1}/{2} : {3}".format(self._name, i+1, len(webpage), fileDownload))
                        try:
                            open(fileDownload, "wb").write(r.content)
                        except Exception as err:
                            self._logger.error("file error : {0}".format(err))
                            exit(1)
                    else:
                        self._logger.error("Not download due status code : {0}".format(r.status_code))
                        self._logger.debug(r.content)
                except ConnectionError as err:
                    self._logger.error("{0} : Connection error : {1}".format(self._name, err))
                    exit(1)
                except Exception as err:
                    self._logger.error("{0} Exception error : {1}".format(self._name, err))
            except Exception as err:
                self._logger.error("parsing error : {0}".format(err))
                exit(1)