1#!/usr/local/bin/python3.8 2 3__license__ = 'GPL v3' 4__copyright__ = 'Copyright 2010 Starson17' 5''' 6www.arcamax.com 7''' 8 9import os 10from calibre.web.feeds.news import BasicNewsRecipe 11from calibre.ptempfile import PersistentTemporaryDirectory 12 13 14class Arcamax(BasicNewsRecipe): 15 title = 'Arcamax' 16 __author__ = 'Kovid Goyal' 17 description = u'Family Friendly Comics - Customize for more days/comics: Defaults to 7 days, 25 comics - 20 general, 5 editorial.' 18 category = 'news, comics' 19 language = 'en' 20 use_embedded_content = False 21 no_stylesheets = True 22 remove_javascript = True 23 simultaneous_downloads = 1 24 cover_url = 'https://www.arcamax.com/images/pub/amuse/leftcol/zits.jpg' 25 26 # ###### USER PREFERENCES - SET COMICS AND NUMBER OF COMICS TO RETRIEVE ## 27 num_comics_to_get = 7 28 # CHOOSE COMIC STRIPS BELOW - REMOVE COMMENT '# ' FROM IN FRONT OF DESIRED 29 # STRIPS 30 31 conversion_options = {'linearize_tables': True, 'comment': description, 'tags': category, 'language': language 32 } 33 34 keep_only_tags = [ 35 dict(name='header', attrs={'class': 'fn-content-header bluelabel'}), 36 dict(name='figure', attrs={'class': ['comic']}), 37 ] 38 39 def parse_index(self): 40 feeds = [] 41 self.panel_tdir = PersistentTemporaryDirectory('arcamax') 42 self.panel_counter = 0 43 for title, url in [ 44 # ####### COMICS - GENERAL ######## 45 # (u"9 Chickweed Lane", u"https://www.arcamax.com/thefunnies/ninechickweedlane"), 46 # (u"Agnes", u"https://www.arcamax.com/thefunnies/agnes"), 47 # (u"Andy Capp", u"https://www.arcamax.com/thefunnies/andycapp"), 48 (u"BC", u"https://www.arcamax.com/thefunnies/bc"), 49 # (u"Baby Blues", u"https://www.arcamax.com/thefunnies/babyblues"), 50 # (u"Beetle Bailey", u"https://www.arcamax.com/thefunnies/beetlebailey"), 51 (u"Blondie", u"https://www.arcamax.com/thefunnies/blondie"), 52 # u"Boondocks", u"https://www.arcamax.com/thefunnies/boondocks"), 53 # (u"Cathy", u"https://www.arcamax.com/thefunnies/cathy"), 54 # (u"Daddys Home", u"https://www.arcamax.com/thefunnies/daddyshome"), 55 (u"Dilbert", u"https://www.arcamax.com/thefunnies/dilbert"), 56 # (u"Dinette Set", u"https://www.arcamax.com/thefunnies/thedinetteset"), 57 (u"Dog Eat Doug", u"https://www.arcamax.com/thefunnies/dogeatdoug"), 58 # (u"Doonesbury", u"https://www.arcamax.com/thefunnies/doonesbury"), 59 # (u"Dustin", u"https://www.arcamax.com/thefunnies/dustin"), 60 (u"Family Circus", u"https://www.arcamax.com/thefunnies/familycircus"), 61 (u"Garfield", u"https://www.arcamax.com/thefunnies/garfield"), 62 # (u"Get Fuzzy", u"https://www.arcamax.com/thefunnies/getfuzzy"), 63 # (u"Girls and Sports", u"https://www.arcamax.com/thefunnies/girlsandsports"), 64 # (u"Hagar the Horrible", u"https://www.arcamax.com/thefunnies/hagarthehorrible"), 65 # (u"Heathcliff", u"https://www.arcamax.com/thefunnies/heathcliff"), 66 # (u"Jerry King Cartoons", u"https://www.arcamax.com/thefunnies/humorcartoon"), 67 # (u"Luann", u"https://www.arcamax.com/thefunnies/luann"), 68 # (u"Momma", u"https://www.arcamax.com/thefunnies/momma"), 69 # (u"Mother Goose and Grimm", u"https://www.arcamax.com/thefunnies/mothergooseandgrimm"), 70 (u"Mutts", u"https://www.arcamax.com/thefunnies/mutts"), 71 # (u"Non Sequitur", u"https://www.arcamax.com/thefunnies/nonsequitur"), 72 # (u"Pearls Before Swine", u"https://www.arcamax.com/thefunnies/pearlsbeforeswine"), 73 # (u"Pickles", u"https://www.arcamax.com/thefunnies/pickles"), 74 # (u"Red and Rover", u"https://www.arcamax.com/thefunnies/redandrover"), 75 # (u"Rubes", u"https://www.arcamax.com/thefunnies/rubes"), 76 # (u"Rugrats", u"https://www.arcamax.com/thefunnies/rugrats"), 77 (u"Speed Bump", u"https://www.arcamax.com/thefunnies/speedbump"), 78 (u"Wizard of Id", u"https://www.arcamax.com/thefunnies/wizardofid"), 79 (u"Zits", u"https://www.arcamax.com/thefunnies/zits"), 80 ]: 81 self.log('Finding strips for:', title) 82 articles = self.make_links(url, title) 83 if articles: 84 feeds.append((title, articles)) 85 if self.test and len(feeds) >= self.test[0]: 86 break 87 return feeds 88 89 def make_links(self, url, title): 90 current_articles = [] 91 num = self.num_comics_to_get 92 while num > 0: 93 num -= 1 94 raw = self.index_to_soup(url, raw=True) 95 self.panel_counter += 1 96 path = os.path.join(self.panel_tdir, '%d.html' % 97 self.panel_counter) 98 with open(path, 'wb') as f: 99 f.write(raw) 100 soup = self.index_to_soup(raw) 101 a = soup.find(name='a', attrs={'class': ['prev']}) 102 prev_page_url = 'https://www.arcamax.com' + a['href'] 103 title = self.tag_to_string( 104 soup.find('title')).partition('|')[0].strip() 105 if 'for' not in title.split(): 106 title = title + ' for today' 107 date = self.tag_to_string( 108 soup.find(name='span', attrs={'class': ['cur']})) 109 self.log('\tFound:', title, 'at:', url) 110 current_articles.append( 111 {'title': title, 'url': 'file://' + path, 'description': '', 'date': date}) 112 if self.test and len(current_articles) >= self.test[1]: 113 break 114 url = prev_page_url 115 current_articles.reverse() 116 return current_articles 117 118 def preprocess_html(self, soup): 119 for img in soup.findAll('img', src=True): 120 if img['src'].startswith('/'): 121 img['src'] = 'https://arcamax.com' + img['src'] 122 return soup 123 124 extra_css = ''' 125 img {max-width:100%; min-width:100%;} 126 ''' 127