1"""
2scrapy.linkextractors
3
4This package contains a collection of Link Extractors.
5
6For more info see docs/topics/link-extractors.rst
7"""
8import re
9from urllib.parse import urlparse
10from warnings import warn
11
12from parsel.csstranslator import HTMLTranslator
13from w3lib.url import canonicalize_url
14
15from scrapy.utils.deprecate import ScrapyDeprecationWarning
16from scrapy.utils.misc import arg_to_iter
17from scrapy.utils.url import (
18    url_is_from_any_domain, url_has_any_extension,
19)
20
21
22# common file extensions that are not followed if they occur in links
23IGNORED_EXTENSIONS = [
24    # archives
25    '7z', '7zip', 'bz2', 'rar', 'tar', 'tar.gz', 'xz', 'zip',
26
27    # images
28    'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
29    'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg', 'cdr', 'ico',
30
31    # audio
32    'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
33
34    # video
35    '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
36    'm4a', 'm4v', 'flv', 'webm',
37
38    # office suites
39    'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
40    'odp',
41
42    # other
43    'css', 'pdf', 'exe', 'bin', 'rss', 'dmg', 'iso', 'apk'
44]
45
46
47_re_type = type(re.compile("", 0))
48
49
50def _matches(url, regexs):
51    return any(r.search(url) for r in regexs)
52
53
54def _is_valid_url(url):
55    return url.split('://', 1)[0] in {'http', 'https', 'file', 'ftp'}
56
57
58class FilteringLinkExtractor:
59
60    _csstranslator = HTMLTranslator()
61
62    def __new__(cls, *args, **kwargs):
63        from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
64        if issubclass(cls, FilteringLinkExtractor) and not issubclass(cls, LxmlLinkExtractor):
65            warn('scrapy.linkextractors.FilteringLinkExtractor is deprecated, '
66                 'please use scrapy.linkextractors.LinkExtractor instead',
67                 ScrapyDeprecationWarning, stacklevel=2)
68        return super().__new__(cls)
69
70    def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
71                 restrict_xpaths, canonicalize, deny_extensions, restrict_css, restrict_text):
72
73        self.link_extractor = link_extractor
74
75        self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)
76                          for x in arg_to_iter(allow)]
77        self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)
78                         for x in arg_to_iter(deny)]
79
80        self.allow_domains = set(arg_to_iter(allow_domains))
81        self.deny_domains = set(arg_to_iter(deny_domains))
82
83        self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
84        self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
85                                          arg_to_iter(restrict_css)))
86
87        self.canonicalize = canonicalize
88        if deny_extensions is None:
89            deny_extensions = IGNORED_EXTENSIONS
90        self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}
91        self.restrict_text = [x if isinstance(x, _re_type) else re.compile(x)
92                              for x in arg_to_iter(restrict_text)]
93
94    def _link_allowed(self, link):
95        if not _is_valid_url(link.url):
96            return False
97        if self.allow_res and not _matches(link.url, self.allow_res):
98            return False
99        if self.deny_res and _matches(link.url, self.deny_res):
100            return False
101        parsed_url = urlparse(link.url)
102        if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
103            return False
104        if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
105            return False
106        if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
107            return False
108        if self.restrict_text and not _matches(link.text, self.restrict_text):
109            return False
110        return True
111
112    def matches(self, url):
113
114        if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
115            return False
116        if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
117            return False
118
119        allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]
120        denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
121        return any(allowed) and not any(denied)
122
123    def _process_links(self, links):
124        links = [x for x in links if self._link_allowed(x)]
125        if self.canonicalize:
126            for link in links:
127                link.url = canonicalize_url(link.url)
128        links = self.link_extractor._process_links(links)
129        return links
130
131    def _extract_links(self, *args, **kwargs):
132        return self.link_extractor._extract_links(*args, **kwargs)
133
134
135# Top-level imports
136from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor as LinkExtractor
137