summaryrefslogtreecommitdiff
path: root/mwctools.py
blob: cefbbf0117062a3a8847f4cd13f578e16f1955bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Copyright: (2013-2017) Michael Till Beck <Debianguru@gmx.de>
# License: GPL-2.0+


import urllib.request
import urllib.error
import urllib.parse
import subprocess

from lxml import etree
from cssselect import GenericTranslator
import re


# Attributes in HTML files storing URI values. These values are automatically translated to absolute URIs.
uriAttributes = [['//img[@src]', 'src'], ['//a[@href]', 'href']]

maxTitleLength = 150


class Parser:
    # input: [Content], output: [Content]
    def performAction(self, contentList):
        pass


class Receiver(Parser):
    def __init__(self, uri):
        self.uri = uri


class Content:
    def __init__(self, uri, encoding, title, content, contenttype):
        self.uri = uri
        self.encoding = encoding
        self.title = title
        self.content = content
        self.contenttype = contenttype


# returns a short subject line
def getSubject(textContent):
    global maxTitleLength
    
    if textContent is None or len(textContent.strip()) == 0:
        return 'Website has been updated'
    textContent = re.sub(' +', ' ', re.sub('\s', ' ', textContent)).strip()
    return (textContent[:maxTitleLength] + ' [..]') if len(textContent) > maxTitleLength else textContent


# translates all relative URIs found in trees to absolute URIs
def toAbsoluteURIs(trees, baseuri):
    global uriAttributes

    for tree in trees:
        if isinstance(tree, str):
            continue
        for uriAttribute in uriAttributes:
            tags = tree.xpath(uriAttribute[0])
            for tag in tags:
                if tag.attrib.get(uriAttribute[1]) is not None:
                    if urllib.parse.urlparse(tag.attrib[uriAttribute[1]]).scheme == '':
                        tag.attrib[uriAttribute[1]] = urllib.parse.urljoin(baseuri, tag.attrib[uriAttribute[1]])


class URLReceiver(Receiver):
    def __init__(self, uri, contenttype='html', encoding='utf-8', userAgent=None, accept=None):
        super().__init__(uri)
        self.contenttype = contenttype
        self.encoding = encoding
        self.userAgent = userAgent
        self.accept = accept

    # input: [Content], output: [Content]
    def performAction(self, contentList=None):
        if contentList is None:
            contentList = []
        
        # open website
        req = urllib.request.Request(self.uri)
        if self.userAgent is not None:
            req.add_header('User-Agent', self.userAgent)
        if self.accept is not None:
            req.add_header('Accept', self.accept)

        with urllib.request.urlopen(req) as thefile:
            filecontent = thefile.read().decode(self.encoding, errors='ignore')
            contentList.append(Content(uri=self.uri, encoding=self.encoding, title=None, content=filecontent, contenttype=self.contenttype))

        return contentList


class CommandReceiver(Receiver):
    def __init__(self, command, contenttype='text', encoding='utf-8'):
        super().__init__(command)
        self.encoding = encoding
        self.command = command
        self.contenttype = contenttype

    # input: [Content], output: [Content]
    def performAction(self, contentList=None):
        if contentList is None:
            contentList = []

        # run command and retrieve output
        process = subprocess.Popen(self.command, stdout=subprocess.PIPE, shell=True, close_fds=True)
        thefile = process.stdout
        result = thefile.read().decode(self.encoding, errors='ignore')
        thefile.close()

        if process.wait() != 0:
            raise Exception("process terminated with an error")

        contentList.append(Content(uri=None, encoding=self.encoding, title=None, content=result, contenttype=self.contenttype))
        return contentList


class XPathParser(Parser):
    def __init__(self, contentxpath, titlexpath=None):
        self.contentxpath = contentxpath
        self.titlexpath = titlexpath

    # input: [Content], output: [Content]
    def performAction(self, contentList):
        result = []
        for content in contentList:
            result.extend(self.parseOneObject(content))
        return result

    # input: Content, output: [Content]
    def parseOneObject(self, content):
        baseuri = content.uri
        if content.contenttype == 'html':
            parser = etree.HTMLParser(encoding=content.encoding)
        else:
            parser = etree.XMLParser(recover=True, encoding=content.encoding)

        tree = etree.fromstring(content.content, parser=parser)

        # xpath
        contentresult = [] if self.contentxpath is None else tree.xpath(self.contentxpath)
        titleresult = [] if self.titlexpath is None else tree.xpath(self.titlexpath)

        # translate relative URIs to absolute URIs
        if content.contenttype == 'html':
            basetaglist = tree.xpath('/html/head/base')
            if len(basetaglist) != 0:
                baseuri = basetaglist[0].attrib['href']
            if len(contentresult) != 0:
                toAbsoluteURIs(contentresult, baseuri)
            if len(titleresult) != 0:
                toAbsoluteURIs(titleresult, baseuri)

        if self.contentxpath and len(contentresult) == 0:
            raise Exception('WARNING: content selector became invalid!')
        if self.titlexpath and len(titleresult) == 0:
            raise Exception('WARNING: title selector became invalid!')

        contents = []
        titles = []
        if isinstance(contentresult, str):
            contents = [contentresult]
        else:
            if len(contentresult) == 0:
                contentresult = titleresult
            contents = [etree.tostring(s, encoding=content.encoding, pretty_print=True).decode(content.encoding, errors='ignore') for s in contentresult]

        if isinstance(titleresult, str):
            titles = [getSubject(titleresult)]*len(contents)
        else:
            if len(titleresult) == 0 or len(titleresult) != len(contentresult):
                titleresult = contentresult
            titles = [getSubject(etree.tostring(s, method='text', encoding=content.encoding).decode(content.encoding, errors='ignore')) for s in titleresult]

        result = []
        for i in range(0, len(contents)):
            result.append(Content(uri=content.uri, encoding=content.encoding, title=titles[i], content=contents[i], contenttype=content.contenttype))

        return result


class CSSParser(Parser):
    def __init__(self, contentcss, titlecss=None):
        contentxpath = GenericTranslator().css_to_xpath(contentcss)
        titlexpath = None
        if titlecss is not None:
            titlexpath = GenericTranslator().css_to_xpath(titlecss)

        self.xpathparser = XPathParser(contentxpath=contentxpath, titlexpath=titlexpath)

    # input: [Content], output: [Content]
    def performAction(self, contentList):
        return self.xpathparser.performAction(contentList)


class RegExParser(Parser):
    def __init__(self, contentregex, titleregex=None):
        self.contentregex = contentregex
        self.titleregex = titleregex

    # input: [Content], output: [Content]
    def performAction(self, contentList):
        result = []
        for content in contentList:
            result.extend(self.parseOneObject(content))
        return result

    # input: Content, output: [Content]
    def parseOneObject(self, content):
        contents = []
        titles = []
        if self.contentregex is not None:
            for c in re.findall(r'' + self.contentregex, content.content, re.M):
                if len(c.strip()) != 0:
                    contents.append(c)
        if self.titleregex is not None:
            for c in re.findall(r'' + self.titleregex, content.title, re.M):
                if len(c.strip()) != 0:
                    titles.append(c)

        if self.contentregex is not None and len(contents) == 0:
            raise Exception('WARNING: content regex became invalid!')
        elif self.titleregex is not None and len(titles) == 0:
            raise Exception('WARNING: title regex became invalid!')
        else:
            if len(contents) == 0:
                contents = titles
            if len(titles) == 0 or len(titles) != len(contents):
                titles = [getSubject(c) for c in contents]

        result = []
        for i in range(0, len(contents)):
            result.append(Content(uri=content.uri, encoding=content.encoding, title=titles[i], content=contents[i], contenttype=content.contenttype))

        return result