1"""Helper APIs for interaction with Google Fonts.
2
3Provides APIs to interact with font subsets, codepoints for font or subset.
4
5To run the tests:
6$ cd fonts/tools
7fonts/tools$ python util/google_fonts.py
8# or do:
9$ python path/to/fonts/tools/utilgoogle_fonts.py --nam_dir path/to/fonts/tools/encodings/
10
11"""
12from __future__ import print_function, unicode_literals
13
14import collections
15import contextlib
16import errno
17import os
18import re
19import sys
20import codecs
21from warnings import warn
22import unittest
23
24if __name__ == '__main__':
25  # some of the imports here wouldn't work otherwise
26  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
27
28from fontTools import ttLib
29
30import fonts_public_pb2 as fonts_pb2
31from google.protobuf import text_format
32import gflags as flags
33from util import py_subsets
34
35FLAGS = flags.FLAGS
36flags.DEFINE_string('nam_dir', 'encodings/', 'nam file dir')
37
38# See https://www.microsoft.com/typography/otspec/name.htm.
39NAME_COPYRIGHT = 0
40NAME_FAMILY = 1
41NAME_UNIQUEID = 3
42NAME_FULLNAME = 4
43NAME_PSNAME = 6
44
45
46_PLATFORM_ID_MICROSOFT = 3
47_PLATFORM_ENC_UNICODE_BMP = 1
48_PLATFORM_ENC_UNICODE_UCS4 = 10
49_PLATFORM_ENCS_UNICODE = (_PLATFORM_ENC_UNICODE_BMP, _PLATFORM_ENC_UNICODE_UCS4)
50
51_FAMILY_WEIGHT_REGEX = r'([^/-]+)-(\w+)\.ttf$'
52
53# Matches 4 or 5 hexadecimal digits that are uppercase at the beginning of the
54# test string. The match is stored in group 0, e.g:
55# >>> _NAMELIST_CODEPOINT_REGEX.match('1234X').groups()[0]
56# '1234'
57# >>> _NAMELIST_CODEPOINT_REGEX.match('1234A').groups()[0]
58# '1234A'
59_NAMELIST_CODEPOINT_REGEX = re.compile('^([A-F0-9]{4,5})')
60
61# The canonical [to Google Fonts] name comes before any aliases
62_KNOWN_WEIGHTS = collections.OrderedDict([
63    ('Thin', 100),
64    ('Hairline', 100),
65    ('ExtraLight', 200),
66    ('Light', 300),
67    ('Regular', 400),
68    ('', 400),  # Family-Italic resolves to this
69    ('Medium', 500),
70    ('SemiBold', 600),
71    ('Bold', 700),
72    ('ExtraBold', 800),
73    ('Black', 900)
74])
75
76
77_VALID_STYLES = {'normal', 'italic'}
78
79
80# (Mask, Name) pairs.
81# See https://www.microsoft.com/typography/otspec/os2.htm#fss.
82_FS_SELECTION_BITS = [
83    (1 << 0, 'ITALIC'),
84    (1 << 1, 'UNDERSCORE'),
85    (1 << 2, 'NEGATIVE'),
86    (1 << 3, 'OUTLINED'),
87    (1 << 4, 'STRIKEOUT'),
88    (1 << 5, 'BOLD'),
89    (1 << 6, 'REGULAR'),
90    (1 << 7, 'USE_TYPO_METRICS'),
91    (1 << 8, 'WWS'),
92    (1 << 9, 'OBLIQUE')
93]
94
95
96# license_dir => license name mappings
97_KNOWN_LICENSE_DIRS = {
98    'apache': 'APACHE2',
99    'ofl': 'OFL',
100    'ufl': 'UFL',
101}
102
103
104FileFamilyStyleWeightTuple = collections.namedtuple(
105    'FileFamilyStyleWeightTuple', ['file', 'family', 'style', 'weight'])
106
107
108class Error(Exception):
109  """Base for Google Fonts errors."""
110
111
112class ParseError(Error):
113  """Exception used when parse failed."""
114
115
116
117
118def UnicodeCmapTables(font):
119  """Find unicode cmap tables in font.
120
121  Args:
122    font: A TTFont.
123  Yields:
124    cmap tables that contain unicode mappings
125  """
126  for table in font['cmap'].tables:
127    if (table.platformID == _PLATFORM_ID_MICROSOFT
128        and table.platEncID in _PLATFORM_ENCS_UNICODE):
129      yield table
130
131
132
133_displayed_errors = set()
134def ShowOnce(msg):
135  global _displayed_errors
136  if msg in _displayed_errors:
137    return
138  _displayed_errors.add(msg)
139  print(msg, file=sys.stderr)
140
141
142def UniqueSort(*args):
143  """Returns a sorted list of the unique items from provided iterable(s).
144
145  Args:
146    *args: Iterables whose items will be merged, sorted and de-duplicated.
147  Returns:
148    A list.
149  """
150  s = set()
151  for arg in args:
152    s.update(arg)
153  return sorted(s)
154
155
156def RegularWeight(metadata):
157  """Finds the filename of the regular (normal/400) font file.
158
159  Args:
160    metadata: The metadata to search for the regular file data.
161  Returns:
162    The name of the regular file, usually Family-Regular.ttf.
163  Raises:
164    OSError: If regular file could not be found. errno.ENOENT.
165  """
166  for f in metadata.fonts:
167    if f.weight == 400 and f.style == 'normal':
168      return os.path.splitext(f.filename)[0] + '.ttf'
169
170  name = '??'
171  if metadata.HasField('name'):
172    name = metadata.name
173  raise OSError(errno.ENOENT, 'unable to find regular weight in %s' % name)
174
175
176def ListSubsets():
177  """Returns a list of all subset names, in lowercase."""
178  return py_subsets.SUBSETS
179
180
181def Metadata(file_or_dir):
182  """Returns fonts_metadata.proto object for a metadata file.
183
184  If file_or_dir is a file named METADATA.pb, load it. If file_or_dir is a
185  directory, load the METADATA.pb file in that directory.
186
187  Args:
188    file_or_dir: A file or directory.
189  Returns:
190    Python object loaded from METADATA.pb content.
191  Raises:
192    ValueError: if file_or_dir isn't a METADATA.pb file or dir containing one.
193  """
194  if (os.path.isfile(file_or_dir) and
195      os.path.basename(file_or_dir) == 'METADATA.pb'):
196    metadata_file = file_or_dir
197  elif os.path.isdir(file_or_dir):
198    metadata_file = os.path.join(file_or_dir, 'METADATA.pb')
199  else:
200    raise ValueError('%s is neither METADATA.pb file or a directory' %
201                     file_or_dir)
202
203  msg = fonts_pb2.FamilyProto()
204  with codecs.open(metadata_file, encoding='utf-8') as f:
205    text_format.Merge(f.read(), msg)
206
207  return msg
208
209
210def CodepointsInSubset(subset, unique_glyphs=False):
211  """Returns the set of codepoints contained in a given subset.
212
213  Args:
214    subset: The lowercase name of a subset, e.g. latin.
215    unique_glyphs: Optional, whether to only include glyphs unique to subset.
216  Returns:
217    A set containing the glyphs in the subset.
218  """
219  if unique_glyphs:
220    filenames = [CodepointFileForSubset(subset)]
221  else:
222    filenames = CodepointFiles(subset)
223
224  filenames = [f for f in filenames if f is not None]
225
226  if not filenames:
227    return None
228
229  cps = set()
230  for filename in filenames:
231    with codecs.open(filename, encoding='utf-8') as f:
232      for line in f:
233        if not line.startswith('#'):
234          match = _NAMELIST_CODEPOINT_REGEX.match(line[2:7])
235          if match is not None:
236            cps.add(int(match.groups()[0], 16))
237
238  return cps
239
240def CodepointsInFont(font_filename):
241  """Returns the set of codepoints present in the font file specified.
242
243  Args:
244    font_filename: The name of a font file.
245  Returns:
246    A set of integers, each representing a codepoint present in font.
247  """
248
249  font_cps = set()
250  with contextlib.closing(ttLib.TTFont(font_filename)) as font:
251    for t in UnicodeCmapTables(font):
252      font_cps.update(t.cmap.keys())
253
254  return font_cps
255
256
257def CodepointFileForSubset(subset):
258  """Returns the full path to the file of codepoints unique to subset.
259
260  This API does NOT return additional codepoint files that are normally merged
261  into the subset. For that, use CodepointFiles.
262
263  Args:
264    subset: The subset we want the codepoint file for.
265  Returns:
266    Full path to the file containing the codepoint file for subset or None if it
267    could not be located.
268  Raises:
269    OSError: If the --nam_dir doesn't exist. errno.ENOTDIR.
270  """
271  # expanduser so we can do things like --nam_dir=~/oss/googlefontdirectory/
272  enc_path = os.path.expanduser(FLAGS.nam_dir)
273  if not os.path.exists(enc_path):
274    raise OSError(errno.ENOTDIR, 'No such directory', enc_path)
275
276  filename = os.path.join(enc_path, '%s_unique-glyphs.nam' % subset)
277  if not os.path.isfile(filename):
278    ShowOnce('no cp file for %s found at %s'
279             % (subset, filename[len(enc_path):]))
280    return None
281
282  return filename
283
284
285def CodepointFiles(subset):
286  """Returns the codepoint files that contain the codepoints in a merged subset.
287
288  If a subset X includes codepoints from multiple files, this function
289  returns all those files while CodepointFileForSubset returns the single
290  file that lists the codepoints unique to the subset. For example, greek-ext
291  contains greek-ext, greek, and latin codepoints. This function would return
292  all three files whereas CodepointFileForSubset would return just greek-ext.
293
294  Args:
295    subset: The subset we want the codepoint files for.
296  Returns:
297    A list of 1 or more codepoint files that make up this subset.
298  """
299  files = [subset]
300
301  # y-ext includes y
302  # Except latin-ext which already has latin.
303  if subset != 'latin-ext' and subset.endswith('-ext'):
304    files.append(subset[:-4])
305
306  # almost all subsets include latin.
307  if subset not in ('khmer', 'latin'):
308    files.append('latin')
309
310  return map(CodepointFileForSubset, files)
311
312
313def SubsetsInFont(file_path, min_pct, ext_min_pct=None):
314  """Finds all subsets for which we support > min_pct of codepoints.
315
316  Args:
317    file_path: A file_path to a font file.
318    min_pct: Min percent coverage to report a subset. 0 means at least 1 glyph.
319    25 means 25%.
320    ext_min_pct: The minimum percent coverage to report a -ext
321    subset supported. Used to admit extended subsets with a lower percent. Same
322    interpretation as min_pct. If None same as min_pct.
323  Returns:
324    A list of 3-tuples of (subset name, #supported, #in subset).
325  """
326  all_cps = CodepointsInFont(file_path)
327
328  results = []
329  for subset in ListSubsets():
330    subset_cps = CodepointsInSubset(subset, unique_glyphs=True)
331    if not subset_cps:
332      continue
333
334    # Khmer includes latin but we only want to report support for non-Latin.
335    if subset == 'khmer':
336      subset_cps -= CodepointsInSubset('latin')
337
338    overlap = all_cps & subset_cps
339
340    target_pct = min_pct
341    if ext_min_pct is not None and subset.endswith('-ext'):
342      target_pct = ext_min_pct
343
344    if 100.0 * len(overlap) / len(subset_cps) > target_pct:
345      results.append((subset, len(overlap), len(subset_cps)))
346
347  return results
348
349def FamilyName(fontname):
350  """Attempts to build family name from font name.
351
352  For example, HPSimplifiedSans => HP Simplified Sans.
353
354  Args:
355    fontname: The name of a font.
356  Returns:
357    The name of the family that should be in this font.
358  """
359  # SomethingUpper => Something Upper
360  fontname = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', fontname)
361  # Font3 => Font 3
362  fontname = re.sub('([a-z])([0-9]+)', r'\1 \2', fontname)
363  # lookHere => look Here
364  return re.sub('([a-z0-9])([A-Z])', r'\1 \2', fontname)
365
366
367def StyleWeight(styleweight):
368  """Breaks apart a style/weight specifier into a 2-tuple of (style, weight).
369
370  Args:
371    styleweight: style/weight string, e.g. Bold, Regular, or ExtraLightItalic.
372  Returns:
373    2-tuple of style (normal or italic) and weight.
374  """
375  if styleweight.endswith('Italic'):
376    return ('italic', _KNOWN_WEIGHTS[styleweight[:-6]])
377
378  return ('normal', _KNOWN_WEIGHTS[styleweight])
379
380
381def FileFamilyStyleWeight(filename):
382  """Extracts family, style, and weight from Google Fonts standard filename.
383
384  Args:
385    filename: Font filename, eg Lobster-Regular.ttf.
386  Returns:
387    FileFamilyStyleWeightTuple for file.
388  Raises:
389    ParseError: if file can't be parsed.
390  """
391  m = re.search(_FAMILY_WEIGHT_REGEX, filename)
392  if not m:
393    raise ParseError('Could not parse %s' % filename)
394  sw = StyleWeight(m.group(2))
395  return FileFamilyStyleWeightTuple(filename, FamilyName(m.group(1)), sw[0],
396                                    sw[1])
397
398
399def ExtractNames(font, name_id):
400  return [n.string.decode(n.getEncoding()).encode('ascii', 'ignore')
401          for n in font['name'].names if n.nameID == name_id]
402
403
404def ExtractName(font_or_file, name_id, default):
405  """Extracts a name table field (first value if many) from a font.
406
407  Args:
408    font_or_file: path to a font file or a TTFont.
409    name_id: the ID of the name desired. Use NAME_* constant.
410    default: result if no value is present.
411  Returns:
412    The value of the first entry for name_id or default if there isn't one.
413  """
414  value = default
415  names = []
416  if type(font_or_file) is ttLib.TTFont:
417    names = ExtractNames(font_or_file, name_id)
418  else:
419    with contextlib.closing(ttLib.TTFont(font_or_file)) as font:
420      names = ExtractNames(font, name_id)
421
422  if names:
423    value = names[0]
424
425  return value
426
427
428def NamePartsForStyleWeight(astyle, aweight):
429  """Gives back the parts that go into the name for this style/weight.
430
431  Args:
432    astyle: The style name, eg "normal" or "italic"
433    aweight: The font weight
434  Returns:
435    Tuple of parts that go into the name, typically the name for the weight and
436    the name for the style, if any ("normal" typically doesn't factor into
437    names).
438  Raises:
439    ValueError: If the astyle or aweight isn't a supported value.
440  """
441  astyle = astyle.lower()
442  if astyle not in _VALID_STYLES:
443    raise ValueError('unsupported style %s' % astyle)
444
445  correct_style = None
446  if astyle == 'italic':
447    correct_style = 'Italic'
448
449  correct_name = None
450  for name, weight in _KNOWN_WEIGHTS.iteritems():
451    if weight == aweight:
452      correct_name = name
453      break
454
455  if not correct_name:
456    raise ValueError('unsupported weight: %d' % aweight)
457
458  return tuple([n for n in [correct_name, correct_style] if n])
459
460
461def _RemoveAll(alist, value):
462  while value in alist:
463    alist.remove(value)
464
465
466def FilenameFor(family, style, weight, ext=''):
467  family = family.replace(' ', '')
468  style_weight = list(NamePartsForStyleWeight(style, weight))
469  if 'Italic' in style_weight:
470    _RemoveAll(style_weight, 'Regular')
471
472  style_weight = ''.join(style_weight)
473  return '%s-%s%s' % (family, style_weight, ext)
474
475
476def FullnameFor(family, style, weight):
477  name_parts = [family]
478  name_parts.extend(list(NamePartsForStyleWeight(style, weight)))
479  _RemoveAll(name_parts, 'Regular')
480  return ' '.join(name_parts)
481
482
483def FontDirs(path):
484  """Finds all the font directories (based on METADATA.pb) under path.
485
486  Args:
487    path: A path to search under.
488  Yields:
489    Directories under path that have a METADATA.pb.
490  """
491  for dir_name, _, _ in os.walk(path):
492    if os.path.isfile(os.path.join(dir_name, 'METADATA.pb')):
493      yield dir_name
494
495
496
497
498def FsSelectionMask(flag):
499  """Get the mask for a given named bit in fsSelection.
500
501  Args:
502    flag: Name of the flag per otspec, eg ITALIC, BOLD, etc.
503  Returns:
504    Bitmask for that flag.
505  Raises:
506    ValueError: if flag isn't the name of any fsSelection bit.
507  """
508  for (mask, name) in _FS_SELECTION_BITS:
509    if name == flag:
510      return mask
511  raise ValueError('No mask for %s' % flag)
512
513
514def FsSelectionFlags(fs_selection):
515  """Get the named flags enabled in a given fsSelection.
516
517  Args:
518    fs_selection: An fsSelection value.
519  Returns:
520    List of names of flags enabled in fs_selection.
521  """
522  names = []
523  for (mask, name) in _FS_SELECTION_BITS:
524    if fs_selection & mask:
525      names.append(name)
526  return names
527
528
529def _EntryForEndOfPath(path, answer_map):
530  segments = [s.lower() for s in path.split(os.sep)]
531  answers = [answer_map[s] for s in segments
532             if s in answer_map]
533  if len(answers) != 1:
534    raise ValueError('Found %d possible matches: %s' % (
535        len(answers), answers))
536  return answers[0]
537
538
539def LicenseFromPath(path):
540  """Try to figure out the license for a given path.
541
542  Splits path and looks for known license dirs in segments.
543
544  Args:
545    path: A filesystem path, hopefully including a license dir.
546  Returns:
547    The name of the license, eg OFL, UFL, etc.
548  Raises:
549    ValueError: if 0 or >1 licenses match path.
550  """
551  return _EntryForEndOfPath(path, _KNOWN_LICENSE_DIRS)
552
553def _parseNamelistHeader(lines):
554  includes = set()
555  for line in lines:
556    if not line.startswith('#$'):
557      # not functional line, regular comment
558      continue
559    keyword, args = line.rstrip()[2:].lstrip().split(' ', 1)
560    if keyword == 'include':
561      includes.add(args)
562  return {
563      'lines': list(lines)
564    , 'includes': includes
565  }
566
567
568def get_codepoint_from_line(line):
569  assert line.startswith('0x')
570  match = _NAMELIST_CODEPOINT_REGEX.match(line[2:7])
571  if match is None:
572    match = _NAMELIST_CODEPOINT_REGEX.match(line[2:7].upper())
573    if match is not None:
574      # Codepoints must be uppercase, it's documented
575      warn('Found a codepoint with lowercase unicode hex value: 0x{0}'.format(match.groups()[0]))
576    return None
577  return int(match.groups()[0], 16)
578
579def _parseNamelist(lines):
580  cps = set()
581  noncodes = set()
582  headerLines = []
583  readingHeader = True
584  for line in lines:
585    if readingHeader:
586      if not line.startswith('#'):
587        # first none comment line ends the header
588        readingHeader = False
589      else:
590        headerLines.append(line)
591        continue
592    # reading the body, i.e. codepoints
593
594    if line.startswith('0x'):
595      codepoint = get_codepoint_from_line(line)
596      if codepoint is None:
597        # ignore all lines that we don't understand
598        continue
599      cps.add(codepoint)
600      # description
601      # line[(2+len(codepoint)),]
602    elif line.startswith('      '):
603      noncode = line.strip().rsplit(' ')[-1]
604      if len(noncode):
605        noncodes.add(noncode)
606
607  header = _parseNamelistHeader(headerLines)
608  return cps, header, noncodes
609
610def parseNamelist(filename):
611  """Parse filename as Namelist and return a tuple of
612    (Codepoints set, header data dict)
613  """
614  with codecs.open(filename, encoding='utf-8') as namFile:
615    return _parseNamelist(namFile)
616
617def _loadNamelistIncludes(item, unique_glyphs, cache):
618  """Load the includes of an encoding Namelist files.
619
620  This is an implementation detail of readNamelist.
621  """
622  includes = item["includes"] = []
623  charset = item["charset"] = set() | item["ownCharset"]
624
625  noCharcode = item["noCharcode"] = set() | item["ownNoCharcode"]
626
627  dirname =  os.path.dirname(item["fileName"])
628  for include in item["header"]["includes"]:
629    includeFile = os.path.join(dirname, include)
630    try:
631      includedItem = readNamelist(includeFile, unique_glyphs, cache)
632    except NamelistRecursionError:
633      continue
634    if includedItem in includes:
635      continue
636    includes.append(includedItem)
637    charset |= includedItem["charset"]
638    noCharcode |= includedItem["ownNoCharcode"]
639  return item
640
641def __readNamelist(cache, filename, unique_glyphs):
642  """Return a dict with the data of an encoding Namelist file.
643
644  This is an implementation detail of readNamelist.
645  """
646  if filename in cache:
647    item = cache[filename]
648  else:
649    cps, header, noncodes = parseNamelist(filename)
650    item = {
651      "fileName": filename
652    , "ownCharset": cps
653    , "header": header
654    , "ownNoCharcode": noncodes
655    , "includes": None # placeholder
656    , "charset": None # placeholder
657    , "noCharcode": None
658    }
659    cache[filename] = item
660
661  if unique_glyphs or item["charset"] is not None:
662    return item
663
664  # full-charset/includes are requested and not cached yet
665  _loadNamelistIncludes(item, unique_glyphs, cache)
666  return item
667
668class NamelistRecursionError(Error):
669  """Exception to control infinite recursion in Namelist includes"""
670  pass
671
672
673def _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs):
674  """ Detect infinite recursion and prevent it.
675
676  This is an implementation detail of readNamelist.
677
678  Raises NamelistRecursionError if namFilename is in the process of being included
679  """
680  # normalize
681  filename = os.path.abspath(os.path.normcase(namFilename))
682  if filename in currentlyIncluding:
683    raise NamelistRecursionError(filename)
684  currentlyIncluding.add(filename)
685  try:
686    result = __readNamelist(cache, filename, unique_glyphs)
687  finally:
688    currentlyIncluding.remove(filename)
689  return result
690
691def readNamelist(namFilename, unique_glyphs=False, cache=None):
692  """
693  Args:
694    namFilename: The path to the  Namelist file.
695    unique_glyphs: Optional, whether to only include glyphs unique to subset.
696    cache: Optional, a dict used to cache loaded Namelist files
697
698  Returns:
699  A dict with following keys:
700  "fileName": (string) absolut path to namFilename
701  "ownCharset": (set) the set of codepoints defined by the file itself
702  "header": (dict) the result of _parseNamelistHeader
703  "includes":
704      (set) if unique_glyphs=False, the resulting dicts of readNamelist
705            for each of the include files
706      (None) if unique_glyphs=True
707  "charset":
708      (set) if unique_glyphs=False, the union of "ownCharset" and all
709            "charset" items of each included file
710      (None) if unique_glyphs=True
711
712  If you are using  unique_glyphs=True and an external cache, don't expect
713  the keys "includes" and "charset" to have a specific value.
714  Depending on the state of cache, if unique_glyphs=True the returned
715  dict may have None values for its "includes" and "charset" keys.
716  """
717  currentlyIncluding = set()
718  if not cache:
719    cache = {}
720  return _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs)
721
722def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None):
723  """Returns the set of codepoints contained in a given Namelist file.
724
725  This is a replacement CodepointsInSubset and implements the "#$ include"
726  header format.
727
728  Args:
729    namFilename: The path to the  Namelist file.
730    unique_glyphs: Optional, whether to only include glyphs unique to subset.
731  Returns:
732    A set containing the glyphs in the subset.
733  """
734  key = 'charset' if not unique_glyphs else 'ownCharset'
735  result = readNamelist(namFilename, unique_glyphs, cache)
736  return result[key]
737
738### unit tests ###
739
740def makeTestMethod(subset, namelistFilename):
741  testName = 'test_legacy_subsets_{0}'.format(subset.replace('-', '_'))
742  def test(self):
743    """Compare if the old function CodepointsInSubset and the new function
744    codepointsInNamelist return the same sets.
745    This will only work as long as the #$inlcude statements in the Namelist
746    files reproduce the old dependency logic implemented in CodepointFiles
747    """
748    charsetOldMethod = set(hex(c) for c in CodepointsInSubset(subset
749                                    , unique_glyphs=self.unique_glyphs))
750
751    charsetNewMethod = set(hex(c) for c in codepointsInNamelist(
752          namelistFilename, unique_glyphs=self.unique_glyphs, cache=self._cache))
753    self.assertTrue(len(charsetOldMethod) > 0)
754    self.assertEqual(charsetOldMethod, charsetNewMethod);
755  return testName, test
756
757def initTestProperties(cls):
758  initialized = []
759  for subset in ListSubsets():
760    namelistFilename = CodepointFileForSubset(subset)
761    if namelistFilename is None:
762      continue
763    testName, test = makeTestMethod(subset, namelistFilename)
764    setattr(cls, testName, test)
765    initialized.append(testName)
766  return initialized
767
768
769class TestCodepointReading(unittest.TestCase):
770  unique_glyphs = True
771  _cache = None
772
773  @classmethod
774  def setUpClass(cls):
775    cls._cache = {}
776
777  @classmethod
778  def tearDownClass(cls):
779    cls._cache = None
780
781
782def main(argv):
783  # CodepointFileForSubset needs gflags to be parsed and that happens in
784  # app.run(). Thus, we can't dynamically build our test cases before.
785  initTestProperties(TestCodepointReading)
786  unittest.main(argv=argv, verbosity=2)
787if __name__ == '__main__':
788  from google.apputils import app
789  app.run()
790