Home
last modified time | relevance | path

Searched refs:extract_text (Results 1 – 25 of 110) sorted by relevance

12345

/dports/www/searx/searx-1.0.0/searx/engines/
H A Drumble.py10 from searx.utils import extract_text
55 url = base_url + extract_text(result_dom.xpath(url_xpath))
56 thumbnail = extract_text(result_dom.xpath(thumbnail_xpath))
57 title = extract_text(result_dom.xpath(title_xpath))
58 p_date = extract_text(result_dom.xpath(published_date))
61 earned = extract_text(result_dom.xpath(earned_xpath))
62 views = extract_text(result_dom.xpath(views_xpath))
63 rumbles = extract_text(result_dom.xpath(rumbles_xpath))
64 author = extract_text(result_dom.xpath(author_xpath))
65 length = extract_text(result_dom.xpath(length_xpath))
H A Debay.py7 from searx.engines.xpath import extract_text
50 url = extract_text(result_dom.xpath(url_xpath))
51 title = extract_text(result_dom.xpath(title_xpath))
52 content = extract_text(result_dom.xpath(content_xpath))
53 price = extract_text(result_dom.xpath(price_xpath))
54 shipping = extract_text(result_dom.xpath(shipping_xpath))
55 source_country = extract_text(result_dom.xpath(source_country_xpath))
56 thumbnail = extract_text(result_dom.xpath(thumbnail_xpath))
H A Dxpath.py5 from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
48 title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
49 content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
61 + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
72 map(extract_text, eval_xpath_list(dom, title_xpath)),
73 map(extract_text, eval_xpath_list(dom, content_xpath)),
74 map(extract_text, eval_xpath_list(dom, cached_xpath))
82 map(extract_text, eval_xpath_list(dom, title_xpath)),
83 map(extract_text, eval_xpath_list(dom, content_xpath))
90 results.append({'suggestion': extract_text(suggestion)})
H A Dkickass.py9 from searx.utils import extract_text, get_torrent_size, convert_str_to_int
59 title = extract_text(link)
60 content = extract_text(result.xpath(content_xpath))
61 seed = extract_text(result.xpath('.//td[contains(@class, "green")]'))
62 leech = extract_text(result.xpath('.//td[contains(@class, "red")]'))
63 filesize_info = extract_text(result.xpath('.//td[contains(@class, "nobr")]'))
64 files = extract_text(result.xpath('.//td[contains(@class, "center")][2]'))
H A Dgoogle_scholar.py21 extract_text,
126 title = extract_text(eval_xpath(result, './h3[1]//a'))
133 content = extract_text(eval_xpath(result, './div[@class="gs_rs"]')) or ''
135 pub_info = extract_text(eval_xpath(result, './div[@class="gs_a"]'))
139 pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
152 results.append({'suggestion': extract_text(suggestion)})
155 results.append({'correction': extract_text(correction)})
H A Dgoogle_videos.py32 extract_text,
163 if extract_text(eval_xpath(result, g_section_with_header)):
167 title = extract_text(eval_xpath_getindex(result, title_xpath, 0))
180 length = extract_text(eval_xpath(c_node, './/div[1]//a/div[3]'))
181 content = extract_text(eval_xpath(c_node, './/div[2]/span'))
182 pub_info = extract_text(eval_xpath(c_node, './/div[2]/div'))
197 results.append({'suggestion': extract_text(suggestion)})
200 results.append({'correction': extract_text(correction)})
H A D1337x.py8 from searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_get…
39 title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]'))
40 seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
41 leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
42 filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
H A Dgoogle_news.py31 extract_text,
162 title = extract_text(eval_xpath(result, './article/h3[1]'))
165 content = extract_text(eval_xpath(result, './article/div[1]'))
169 pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))
170 pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))
187 img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
H A Dina.py11 from searx.utils import extract_text
62 title = unescape(extract_text(result.xpath(title_xpath)))
64 thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
69 d = extract_text(result.xpath(publishedDate_xpath)[0])
74 content = extract_text(result.xpath(content_xpath))
H A Dahmia.py8 from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath
65 title = extract_text(eval_xpath(result, title_xpath))
66 content = extract_text(eval_xpath(result, content_xpath))
75 results.append({'correction': extract_text(correction)})
81 results.append({'number_of_results': int(extract_text(number_of_results))})
H A Dtokyotoshokan.py10 from searx.utils import extract_text, get_torrent_size, int_or_zero
62 'title': extract_text(links[-1])
74 desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
96 params['seed'] = int_or_zero(extract_text(stats[0]))
97 params['leech'] = int_or_zero(extract_text(stats[1]))
H A Dfdroid.py8 from searx.utils import extract_text
44 app_title = extract_text(app.xpath('./div/h4[@class="package-name"]/text()'))
45 app_content = extract_text(app.xpath('./div/div/span[@class="package-summary"]')).strip() \
46 + ' - ' + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
H A Dyahoo_news.py20 extract_text,
81 title = extract_text(result.xpath('.//h4/a'))
82 content = extract_text(result.xpath('.//p'))
92 pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
109 results.append({'suggestion': extract_text(suggestion)})
H A Ddoku.py8 from searx.utils import extract_text, eval_xpath
60 title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
72 title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
74 content = extract_text(eval_xpath(r, '.'))
H A Dnot_evil.py8 from searx.engines.xpath import extract_text
57 url = extract_text(result.xpath(url_xpath)[0])
58 title = extract_text(result.xpath(title_xpath)[0])
59 content = extract_text(result.xpath(content_xpath))
H A Ddigbt.py8 from searx.utils import extract_text, get_torrent_size
45 title = extract_text(result.xpath('.//a[@title]'))
46 content = extract_text(result.xpath('.//div[@class="files"]'))
47 files_data = extract_text(result.xpath('.//div[@class="tail"]')).split()
H A Dframalibre.py9 from searx.utils import extract_text
56 title = escape(extract_text(link))
60 thumbnail = extract_text(thumbnail_tags[0])
63 content = escape(extract_text(result.xpath(content_xpath)))
H A Dbing.py10 from searx.utils import eval_xpath, extract_text, match_language
69 title = extract_text(link)
70 content = extract_text(eval_xpath(result, './/p'))
81 title = extract_text(link)
82 content = extract_text(eval_xpath(result, './/p'))
H A Dgoogle.py16 from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getin…
262 if extract_text(eval_xpath(result, g_section_with_header)):
272 title = extract_text(title_tag)
276 …content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=Tru…
293 results.append({'suggestion': extract_text(suggestion)})
296 results.append({'correction': extract_text(correction)})
H A Dduckduckgo.py8 from searx.utils import extract_text, match_language, eval_xpath, dict_subset
109 title = extract_text(eval_xpath(r, title_xpath))
110 content = extract_text(eval_xpath(r, content_xpath))
120 results.append({'correction': extract_text(correction)})
H A Detools.py8 from searx.utils import extract_text, eval_xpath
49 title = extract_text(eval_xpath(result, './a//text()'))
50 content = extract_text(eval_xpath(result, './/div[@class="text"]//text()'))
/dports/lang/ruby26/ruby-2.6.9/lib/rdoc/generator/pot/
H A Dmessage_extractor.rb29 extract_text(klass.comment_location, klass.full_name)
32 extract_text(section.title ,"#{klass.full_name}: section title")
34 extract_text(comment, "#{klass.full_name}: #{section.title}")
39 extract_text(constant.comment, constant.full_name)
43 extract_text(attribute.comment, attribute.full_name)
47 extract_text(method.comment, method.full_name)
51 def extract_text text, comment, location = nil method in MessageExtractor
/dports/science/siconos/siconos-4.4.0/docs/gendoctools/
H A Dsicodoxy2swig.py149 descr = self.extract_text(brief).strip()
164 ename = self.extract_text(self.get_specific_subnodes(n, 'name'))
175 ename = self.extract_text(self.get_specific_subnodes(node, 'name'))
179 edescr = self.extract_text(
324 sig_prefix = self.extract_text(ns_node[0]) + '::'
328 sig_prefix = self.extract_text(cn_node[0]) + '::'
381 name = self.extract_text(self.get_specific_subnodes(node, 'name'))
383 argsstring = self.extract_text(
389 declname = self.extract_text(
393 defval = self.extract_text(
[all …]
/dports/www/searx/searx-1.0.0/tests/unit/
H A Dtest_utils.py59 self.assertEqual(utils.extract_text(dom), 'Test text')
60 self.assertEqual(utils.extract_text(dom.xpath('//span')), 'Test text')
61 self.assertEqual(utils.extract_text(dom.xpath('//span/text()')), 'Test text')
62 self.assertEqual(utils.extract_text(dom.xpath('count(//span)')), '3.0')
63 self.assertEqual(utils.extract_text(dom.xpath('boolean(//span)')), 'True')
64 self.assertEqual(utils.extract_text(dom.xpath('//img/@src')), 'test.jpg')
65 self.assertEqual(utils.extract_text(dom.xpath('//unexistingtag')), '')
66 self.assertEqual(utils.extract_text(None, allow_none=True), None)
68 utils.extract_text(None)
70 utils.extract_text({})
/dports/net-im/biboumi/biboumi-9.0/tests/end_to_end/scenarios/
H A Dchannel_list_with_rsm.py37 …after = save_value("first", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:first",…
49 …after = save_value("second", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:first"…
59 …after = save_value("third", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:first",…

12345