1"""Script to generate reports on translator classes from Doxygen sources.
2
3  The main purpose of the script is to extract the information from sources
4  related to internationalization (the translator classes). It uses the
5  information to generate documentation (language.doc,
6  translator_report.txt) from templates (language.tpl, maintainers.txt).
7
8  Simply run the script without parameters to get the reports and
9  documentation for all supported languages. If you want to generate the
10  translator report only for some languages, pass their codes as arguments
11  to the script. In that case, the language.doc will not be generated.
12  Example:
13
14    python translator.py en nl cz
15
16  Originally, the script was written in Perl and was known as translator.pl.
17  The last Perl version was dated 2002/05/21 (plus some later corrections)
18
19                                         Petr Prikryl (prikryl at atlas dot cz)
20
21  History:
22  --------
23  2002/05/21 - This was the last Perl version.
24  2003/05/16 - List of language marks can be passed as arguments.
25  2004/01/24 - Total reimplementation started: classes TrManager, and Transl.
26  2004/02/05 - First version that produces translator report. No language.doc yet.
27  2004/02/10 - First fully functional version that generates both the translator
28               report and the documentation. It is a bit slower than the
29               Perl version, but is much less tricky and much more flexible.
30               It also solves some problems that were not solved by the Perl
31               version. The translator report content should be more useful
32               for developers.
33  2004/02/11 - Some tuning-up to provide more useful information.
34  2004/04/16 - Added new tokens to the tokenizer (to remove some warnings).
35  2004/05/25 - Added from __future__ import generators not to force Python 2.3.
36  2004/06/03 - Removed dependency on textwrap module.
37  2004/07/07 - Fixed the bug in the fill() function.
38  2004/07/21 - Better e-mail mangling for HTML part of language.doc.
39             - Plural not used for reporting a single missing method.
40             - Removal of not used translator adapters is suggested only
41               when the report is not restricted to selected languages
42               explicitly via script arguments.
43  2004/07/26 - Better reporting of not-needed adapters.
44  2004/10/04 - Reporting of not called translator methods added.
45  2004/10/05 - Modified to check only doxygen/src sources for the previous report.
46  2005/02/28 - Slight modification to generate "mailto.txt" auxiliary file.
47  2005/08/15 - Doxygen's root directory determined primarily from DOXYGEN
48               environment variable. When not found, then relatively to the script.
49  2007/03/20 - The "translate me!" searched in comments and reported if found.
50  2008/06/09 - Warning when the MAX_DOT_GRAPH_HEIGHT is still part of trLegendDocs().
51  2009/05/09 - Changed HTML output to fit it with XHTML DTD
52  2009/09/02 - Added percentage info to the report (implemented / to be implemented).
53  2010/02/09 - Added checking/suggestion 'Reimplementation using UTF-8 suggested.
54  2010/03/03 - Added [unreachable] prefix used in maintainers.txt.
55  2010/05/28 - BOM skipped; minor code cleaning.
56  2010/05/31 - e-mail mangled already in maintainers.txt
57  2010/08/20 - maintainers.txt to UTF-8, related processing of unicode strings
58             - [any mark] introduced instead of [unreachable] only
59             - marks highlighted in HTML
60  2010/08/30 - Highlighting in what will be the table in langhowto.html modified.
61  2010/09/27 - The underscore in \latexonly part of the generated language.doc
62               was prefixed by backslash (was LaTeX related error).
63  2013/02/19 - Better diagnostics when translator_xx.h is too crippled.
64  2013/06/25 - TranslatorDecoder checks removed after removing the class.
65  2013/09/04 - Coloured status in langhowto. *ALMOST up-to-date* category
66               of translators introduced.
67  2014/06/16 - unified for Python 2.6+ and 3.0+
68  """
69
70from __future__ import print_function
71
72import os
73import platform
74import re
75import sys
76import textwrap
77
78
79def xopen(fname, mode='r', encoding='utf-8-sig'):
80    '''Unified file opening for Python 2 an Python 3.
81
82    Python 2 does not have the encoding argument. Python 3 has one, and
83    the default 'utf-8-sig' is used (skips the BOM automatically).
84    '''
85
86    if sys.version_info[0] == 2:
87        return open(fname, mode=mode) # Python 2 without encoding
88    else:
89        return open(fname, mode=mode, encoding=encoding) # Python 3 with encoding
90
91
92def fill(s):
93    """Returns string formatted to the wrapped paragraph multiline string.
94
95    Replaces whitespaces by one space and then uses he textwrap.fill()."""
96
97    # Replace all whitespace by spaces, remove whitespaces that are not
98    # necessary, strip the left and right whitespaces, and break the string
99    # to list of words.
100    rexWS = re.compile(r'\s+')
101    lst = rexWS.sub(' ', s).strip().split()
102
103    # If the list is not empty, put the words together and form the lines
104    # of maximum 70 characters. Build the list of lines.
105    lines = []
106    if lst:
107        line = lst.pop(0)   # no separation space in front of the first word
108        for word in lst:
109            if len(line) + len(word) < 70:
110                line += ' ' + word
111            else:
112                lines.append(line)  # another full line formed
113                line = word         # next line started
114        lines.append(line)          # the last line
115    return '\n'.join(lines)
116
117
118class Transl:
119    """One instance is build for each translator.
120
121    The abbreviation of the source file--part after 'translator_'--is used as
122    the identification of the object. The empty string is used for the
123    abstract Translator class from translator.h. The other information is
124    extracted from inside the source file."""
125
126    def __init__(self, fname, manager):
127        """Bind to the manager and initialize."""
128
129        # Store the filename and the reference to the manager object.
130        self.fname = fname
131        self.manager = manager
132
133        # The instance is responsible for loading the source file, so it checks
134        # for its existence and quits if something goes wrong.
135        if not os.path.isfile(fname):
136            sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
137            sys.exit(1)
138
139        # Initialize the other collected information.
140        self.classId = None
141        self.baseClassId = None
142        self.readableStatus = None   # 'up-to-date', '1.2.3', '1.3', etc.
143        self.status = None           # '', '1.2.03', '1.3.00', etc.
144        self.lang = None             # like 'Brazilian'
145        self.langReadable = None     # like 'Brazilian Portuguese'
146        self.note = None             # like 'should be cleaned up'
147        self.prototypeDic = {}       # uniPrototype -> prototype
148        self.translateMeText = 'translate me!'
149        self.translateMeFlag = False # comments with "translate me!" found
150        self.txtMAX_DOT_GRAPH_HEIGHT_flag = False # found in string in trLegendDocs()
151        self.obsoleteMethods = None  # list of prototypes to be removed
152        self.missingMethods = None   # list of prototypes to be implemented
153        self.implementedMethods = None  # list of implemented required methods
154        self.adaptMinClass = None    # The newest adapter class that can be used
155
156    def __tokenGenerator(self):
157        """Generator that reads the file and yields tokens as 4-tuples.
158
159        The tokens have the form (tokenId, tokenString, lineNo). The
160        last returned token has the form ('eof', None, None). When trying
161        to access next token after that, the exception would be raised."""
162
163        # Set the dictionary for recognizing tokenId for keywords, separators
164        # and the similar categories. The key is the string to be recognized,
165        # the value says its token identification.
166        tokenDic = { 'class':     'class',
167                     'const':     'const',
168                     'public':    'public',
169                     'protected': 'protected',
170                     'private':   'private',
171                     'static':    'static',
172                     'virtual':   'virtual',
173                     ':':         'colon',
174                     ';':         'semic',
175                     ',':         'comma',
176                     '[':         'lsqbra',
177                     ']':         'rsqbra',
178                     '(':         'lpar',
179                     ')':         'rpar',
180                     '{':         'lcurly',
181                     '}':         'rcurly',
182                     '=':         'assign',
183                     '*':         'star',
184                     '&':         'amp',
185                     '+':         'plus',
186                     '-':         'minus',
187                     '!':         'excl',
188                     '?':         'qmark',
189                     '<':         'lt',
190                     '>':         'gt',
191                     "'":         'quot',
192                     '"':         'dquot',
193                     '.':         'dot',
194                     '%':         'perc',
195                     '~':         'tilde',
196                     '^':         'caret',
197                   }
198
199        # Regular expression for recognizing identifiers.
200        rexId = re.compile(r'^[a-zA-Z]\w*$')
201
202        # Open the file for reading and extracting tokens until the eof.
203        # Initialize the finite automaton.
204        f = xopen(self.fname)
205        lineNo = 0
206        line = ''         # init -- see the pos initialization below
207        linelen = 0       # init
208        pos = 100         # init -- pos after the end of line
209        status = 0
210
211        tokenId = None    # init
212        tokenStr = ''     # init -- the characters will be appended.
213        tokenLineNo = 0
214
215        while status != 777:
216
217            # Get the next character. Read next line first, if necessary.
218            if pos < linelen:
219                c = line[pos]
220            else:
221                lineNo += 1
222                line = f.readline()
223                linelen = len(line)
224                pos = 0
225                if line == '':         # eof
226                    status = 777
227                else:
228                    c = line[pos]
229
230            # Consume the character based on the status
231
232            if status == 0:     # basic status
233
234                # This is the initial status. If tokenId is set, yield the
235                # token here and only here (except when eof is found).
236                # Initialize the token variables after the yield.
237                if tokenId:
238                    # If it is an unknown item, it can still be recognized
239                    # here. Keywords and separators are the example.
240                    if tokenId == 'unknown':
241                        if tokenStr in tokenDic:
242                            tokenId = tokenDic[tokenStr]
243                        elif tokenStr.isdigit():
244                            tokenId = 'num'
245                        elif rexId.match(tokenStr):
246                            tokenId = 'id'
247                        else:
248                            msg = '\aWarning: unknown token "' + tokenStr + '"'
249                            msg += '\tfound on line %d' % tokenLineNo
250                            msg += ' in "' + self.fname + '".\n'
251                            sys.stderr.write(msg)
252
253                    yield (tokenId, tokenStr, tokenLineNo)
254
255                    # If it is a comment that contains the self.translateMeText
256                    # string, set the flag -- the situation will be reported.
257                    if tokenId == 'comment' and tokenStr.find(self.translateMeText) >= 0:
258                        self.translateMeFlag = True
259
260                    tokenId = None
261                    tokenStr = ''
262                    tokenLineNo = 0
263
264                # Now process the character. When we just skip it (spaces),
265                # stay in this status. All characters that will be part of
266                # some token cause moving to the specific status. And only
267                # when moving to the status == 0 (or the final state 777),
268                # the token is yielded. With respect to that the automaton
269                # behaves as Moore's one (output bound to status). When
270                # collecting tokens, the automaton is the Mealy's one
271                # (actions bound to transitions).
272                if c.isspace():
273                    pass                 # just skip whitespace characters
274                elif c == '/':           # Possibly comment starts here, but
275                    tokenId = 'unknown'  # it could be only a slash in code.
276                    tokenStr = c
277                    tokenLineNo = lineNo
278                    status = 1
279                elif c == '#':
280                    tokenId = 'preproc'  # preprocessor directive
281                    tokenStr = c
282                    tokenLineNo = lineNo
283                    status = 5
284                elif c == '"':           # string starts here
285                    tokenId = 'string'
286                    tokenStr = c
287                    tokenLineNo = lineNo
288                    status = 6
289                elif c == "'":           # char literal starts here
290                    tokenId = 'charlit'
291                    tokenStr = c
292                    tokenLineNo = lineNo
293                    status = 8
294                elif c in tokenDic:  # known one-char token
295                    tokenId = tokenDic[c]
296                    tokenStr = c
297                    tokenLineNo = lineNo
298                    # stay in this state to yield token immediately
299                else:
300                    tokenId = 'unknown'  # totally unknown
301                    tokenStr = c
302                    tokenLineNo = lineNo
303                    status = 333
304
305                pos += 1                 # move position in any case
306
307            elif status == 1:            # possibly a comment
308                if c == '/':             # ... definitely the C++ comment
309                    tokenId = 'comment'
310                    tokenStr += c
311                    pos += 1
312                    status = 2
313                elif c == '*':           # ... definitely the C comment
314                    tokenId = 'comment'
315                    tokenStr += c
316                    pos += 1
317                    status = 3
318                else:
319                    status = 0           # unrecognized, don't move pos
320
321            elif status == 2:            # inside the C++ comment
322                if c == '\n':            # the end of C++ comment
323                    status = 0           # yield the token
324                else:
325                    tokenStr += c        # collect the C++ comment
326                pos += 1
327
328            elif status == 3:            # inside the C comment
329                if c == '*':             # possibly the end of the C comment
330                    tokenStr += c
331                    status = 4
332                else:
333                    tokenStr += c        # collect the C comment
334                pos += 1
335
336            elif status == 4:            # possibly the end of the C comment
337                if c == '/':             # definitely the end of the C comment
338                    tokenStr += c
339                    status = 0           # yield the token
340                elif c == '*':           # more stars inside the comment
341                    tokenStr += c
342                else:
343                    tokenStr += c        # this cannot be the end of comment
344                    status = 3
345                pos += 1
346
347            elif status == 5:            # inside the preprocessor directive
348                if c == '\n':            # the end of the preproc. command
349                    status = 0           # yield the token
350                else:
351                    tokenStr += c        # collect the preproc
352                pos += 1
353
354            elif status == 6:            # inside the string
355                if c == '\\':            # escaped char inside the string
356                    tokenStr += c
357                    status = 7
358                elif c == '"':           # end of the string
359                    tokenStr += c
360                    status = 0
361                else:
362                    tokenStr += c        # collect the chars of the string
363                pos += 1
364
365            elif status == 7:            # escaped char inside the string
366                tokenStr += c            # collect the char of the string
367                status = 6
368                pos += 1
369
370            elif status == 8:            # inside the char literal
371                tokenStr += c            # collect the char of the literal
372                status = 9
373                pos += 1
374
375            elif status == 9:            # end of char literal expected
376                if c == "'":             # ... and found
377                    tokenStr += c
378                    status = 0
379                    pos += 1
380                else:
381                    tokenId = 'error'    # end of literal was expected
382                    tokenStr += c
383                    status = 0
384
385            elif status == 333:          # start of the unknown token
386                if c.isspace():
387                    pos += 1
388                    status = 0           # tokenId may be determined later
389                elif c in tokenDic:  # separator, don't move pos
390                    status = 0
391                else:
392                    tokenStr += c        # collect
393                    pos += 1
394
395        # We should have finished in the final status. If some token
396        # have been extracted, yield it first.
397        assert(status == 777)
398        if tokenId:
399            yield (tokenId, tokenStr, tokenLineNo)
400            tokenId = None
401            tokenStr = ''
402            tokenLineNo = 0
403
404        # The file content is processed. Close the file. Then always yield
405        # the eof token.
406        f.close()
407        yield ('eof', None, None)
408
409
410    def __collectClassInfo(self, tokenIterator):
411        """Collect the information about the class and base class.
412
413        The tokens including the opening left curly brace of the class are
414        consumed."""
415
416        status = 0  # initial state
417
418        while status != 777:   # final state
419
420            # Always assume that the previous tokens were processed. Get
421            # the next one.
422            tokenId, tokenStr, tokenLineNo = next(tokenIterator)
423
424            # Process the token and never return back.
425            if status == 0:    # waiting for the 'class' keyword.
426                if tokenId == 'class':
427                    status = 1
428
429            elif status == 1:  # expecting the class identification
430                if tokenId == 'id':
431                    self.classId = tokenStr
432                    status = 2
433                else:
434                    self.__unexpectedToken(status, tokenId, tokenLineNo)
435
436            elif status == 2:  # expecting the curly brace or base class info
437                if tokenId == 'lcurly':
438                    status = 777        # correctly finished
439                elif tokenId == 'colon':
440                    status = 3
441                else:
442                    self.__unexpectedToken(status, tokenId, tokenLineNo)
443
444            elif status == 3:  # expecting the 'public' in front of base class id
445                if tokenId == 'public':
446                    status = 4
447                else:
448                    self.__unexpectedToken(status, tokenId, tokenLineNo)
449
450            elif status == 4:  # expecting the base class id
451                if tokenId == 'id':
452                    self.baseClassId = tokenStr
453                    status = 5
454                else:
455                    self.__unexpectedToken(status, tokenId, tokenLineNo)
456
457            elif status == 5:  # expecting the curly brace and quitting
458                if tokenId == 'lcurly':
459                    status = 777        # correctly finished
460                elif tokenId == 'comment':
461                    pass
462                else:
463                    self.__unexpectedToken(status, tokenId, tokenLineNo)
464
465        # Extract the status of the TranslatorXxxx class. The readable form
466        # will be used in reports the status form is a string that can be
467        # compared lexically (unified length, padding with zeros, etc.).
468        if self.baseClassId:
469            lst = self.baseClassId.split('_')
470            if lst[0] == 'Translator':
471                self.readableStatus = 'up-to-date'
472                self.status = ''
473            elif lst[0] == 'TranslatorAdapter':
474                self.status = lst[1] + '.' + lst[2]
475                self.readableStatus = self.status
476                if len(lst) > 3:        # add the last part of the number
477                    self.status += '.' + ('%02d' % int(lst[3]))
478                    self.readableStatus += '.' + lst[3]
479                else:
480                    self.status += '.00'
481            elif lst[0] == 'TranslatorEnglish':
482                # Obsolete or Based on English.
483                if self.classId[-2:] == 'En':
484                    self.readableStatus = 'English based'
485                    self.status = 'En'
486                else:
487                    self.readableStatus = 'obsolete'
488                    self.status = '0.0.00'
489
490            # Check whether status was set, or set 'strange'.
491            if self.status == None:
492                self.status = 'strange'
493            if not self.readableStatus:
494                self.readableStatus = 'strange'
495
496            # Extract the name of the language and the readable form.
497            self.lang = self.classId[10:]  # without 'Translator'
498            if self.lang == 'Brazilian':
499                self.langReadable = 'Brazilian Portuguese'
500            elif self.lang == 'Chinesetraditional':
501                self.langReadable = 'Chinese Traditional'
502            else:
503                self.langReadable = self.lang
504
505
506    def __unexpectedToken(self, status, tokenId, tokenLineNo):
507        """Reports unexpected token and quits with exit code 1."""
508
509        import inspect
510        calledFrom = inspect.stack()[1][3]
511        msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n"
512        msg = msg % (tokenId, tokenLineNo, self.fname)
513        msg += 'status = %d in %s()\n' % (status, calledFrom)
514        sys.stderr.write(msg)
515        sys.exit(1)
516
517
518    def collectPureVirtualPrototypes(self):
519        """Returns dictionary 'unified prototype' -> 'full prototype'.
520
521        The method is expected to be called only for the translator.h. It
522        extracts only the pure virtual method and build the dictionary where
523        key is the unified prototype without argument identifiers."""
524
525        # Prepare empty dictionary that will be returned.
526        resultDic = {}
527
528        # Start the token generator which parses the class source file.
529        tokenIterator = self.__tokenGenerator()
530
531        # Collect the class and the base class identifiers.
532        self.__collectClassInfo(tokenIterator)
533        assert(self.classId == 'Translator')
534
535        # Let's collect readable form of the public virtual pure method
536        # prototypes in the readable form -- as defined in translator.h.
537        # Let's collect also unified form of the same prototype that omits
538        # everything that can be omitted, namely 'virtual' and argument
539        # identifiers.
540        prototype = ''    # readable prototype (with everything)
541        uniPrototype = '' # unified prototype (without arg. identifiers)
542
543        # Collect the pure virtual method prototypes. Stop on the closing
544        # curly brace followed by the semicolon (end of class).
545        status = 0
546        curlyCnt = 0      # counter for the level of curly braces
547
548        # Loop until the final state 777 is reached. The errors are processed
549        # immediately. In this implementation, it always quits the application.
550        while status != 777:
551
552            # Get the next token.
553            tokenId, tokenStr, tokenLineNo = next(tokenIterator)
554
555            if status == 0:      # waiting for 'public:'
556                if tokenId == 'public':
557                    status = 1
558
559            elif status == 1:    # colon after the 'public'
560                if tokenId == 'colon':
561                    status = 2
562                else:
563                    self.__unexpectedToken(status, tokenId, tokenLineNo)
564
565            elif status == 2:    # waiting for 'virtual'
566                if tokenId == 'virtual':
567                    prototype = tokenStr  # but not to unified prototype
568                    status = 3
569                elif tokenId == 'comment':
570                    pass
571                elif tokenId == 'rcurly':
572                    status = 11         # expected end of class
573                else:
574                    self.__unexpectedToken(status, tokenId, tokenLineNo)
575
576            elif status == 3:    # return type of the method expected
577                if tokenId == 'id':
578                    prototype += ' ' + tokenStr
579                    uniPrototype = tokenStr  # start collecting the unified prototype
580                    status = 4
581                elif tokenId == 'tilde':
582                    status = 4
583                else:
584                    self.__unexpectedToken(status, tokenId, tokenLineNo)
585
586            elif status == 4:    # method identifier expected
587                if tokenId == 'id':
588                    prototype += ' ' + tokenStr
589                    uniPrototype += ' ' + tokenStr
590                    status = 5
591                else:
592                    self.__unexpectedToken(status, tokenId, tokenLineNo)
593
594            elif status == 5:    # left bracket of the argument list expected
595                if tokenId == 'lpar':
596                    prototype += tokenStr
597                    uniPrototype += tokenStr
598                    status = 6
599                else:
600                    self.__unexpectedToken(status, tokenId, tokenLineNo)
601
602            elif status == 6:    # collecting arguments of the method
603                if tokenId == 'rpar':
604                    prototype += tokenStr
605                    uniPrototype += tokenStr
606                    status = 7
607                elif tokenId == 'const':
608                    prototype += tokenStr
609                    uniPrototype += tokenStr
610                    status = 12
611                elif tokenId == 'id':           # type identifier
612                    prototype += tokenStr
613                    uniPrototype += tokenStr
614                    status = 13
615                else:
616                    self.__unexpectedToken(status, tokenId, tokenLineNo)
617
618            elif status == 7:    # assignment expected or left curly brace
619                if tokenId == 'assign':
620                    status = 8
621                elif tokenId == 'lcurly':
622                    curlyCnt = 1      # method body entered
623                    status = 10
624                else:
625                    self.__unexpectedToken(status, tokenId, tokenLineNo)
626
627            elif status == 8:    # zero expected
628                if tokenId == 'num' and tokenStr == '0':
629                    status = 9
630                else:
631                    self.__unexpectedToken(status, tokenId, tokenLineNo)
632
633            elif status == 9:    # after semicolon, produce the dic item
634                if tokenId == 'semic':
635                    assert(uniPrototype not in resultDic)
636                    resultDic[uniPrototype] = prototype
637                    status = 2
638                else:
639                    self.__unexpectedToken(status, tokenId, tokenLineNo)
640
641            elif status == 10:   # consuming the body of the method
642                if tokenId == 'rcurly':
643                    curlyCnt -= 1
644                    if curlyCnt == 0:
645                        status = 2     # body consumed
646                elif tokenId == 'lcurly':
647                    curlyCnt += 1
648
649            elif status == 11:   # probably the end of class
650                if tokenId == 'semic':
651                    status = 777
652                else:
653                    self.__unexpectedToken(status, tokenId, tokenLineNo)
654
655            elif status == 12:   # type id for argument expected
656                if tokenId == 'id':
657                    prototype += ' ' + tokenStr
658                    uniPrototype += ' ' + tokenStr
659                    status = 13
660                else:
661                    self.__unexpectedToken(status, tokenId, tokenLineNo)
662
663            elif status == 13:   # namespace qualification or * or & expected
664                if tokenId == 'colon':        # was namespace id
665                    prototype += tokenStr
666                    uniPrototype += tokenStr
667                    status = 14
668                elif tokenId == 'star' or tokenId == 'amp':  # pointer or reference
669                    prototype += ' ' + tokenStr
670                    uniPrototype += ' ' + tokenStr
671                    status = 16
672                elif tokenId == 'id':         # argument identifier
673                    prototype += ' ' + tokenStr
674                    # don't put this into unified prototype
675                    status = 17
676                else:
677                    self.__unexpectedToken(status, tokenId, tokenLineNo)
678
679            elif status == 14:   # second colon for namespace:: expected
680                if tokenId == 'colon':
681                    prototype += tokenStr
682                    uniPrototype += tokenStr
683                    status = 15
684                else:
685                    self.__unexpectedToken(status, tokenId, tokenLineNo)
686
687            elif status == 15:   # type after namespace:: expected
688                if tokenId == 'id':
689                    prototype += tokenStr
690                    uniPrototype += tokenStr
691                    status = 13
692                else:
693                    self.__unexpectedToken(status, tokenId, tokenLineNo)
694
695            elif status == 16:   # argument identifier expected
696                if tokenId == 'id':
697                    prototype += ' ' + tokenStr
698                    # don't put this into unified prototype
699                    status = 17
700                else:
701                    self.__unexpectedToken(status, tokenId, tokenLineNo)
702
703            elif status == 17:   # comma or ')' after argument identifier expected
704                if tokenId == 'comma':
705                    prototype += ', '
706                    uniPrototype += ', '
707                    status = 6
708                elif tokenId == 'rpar':
709                    prototype += tokenStr
710                    uniPrototype += tokenStr
711                    status = 7
712                else:
713                    self.__unexpectedToken(status, tokenId, tokenLineNo)
714
715        # Eat the rest of the source to cause closing the file.
716        while tokenId != 'eof':
717            tokenId, tokenStr, tokenLineNo = next(tokenIterator)
718
719        # Return the resulting dictionary with 'uniPrototype -> prototype'.
720        return resultDic
721
722
723    def __collectPublicMethodPrototypes(self, tokenIterator):
724        """Collects prototypes of public methods and fills self.prototypeDic.
725
726        The dictionary is filled by items: uniPrototype -> prototype.
727        The method is expected to be called only for TranslatorXxxx classes,
728        i.e. for the classes that implement translation to some language.
729        It assumes that the opening curly brace of the class was already
730        consumed. The source is consumed until the end of the class.
731        The caller should consume the source until the eof to cause closing
732        the source file."""
733
734        assert(self.classId != 'Translator')
735        assert(self.baseClassId != None)
736
737        # The following finite automaton slightly differs from the one
738        # inside self.collectPureVirtualPrototypes(). It produces the
739        # dictionary item just after consuming the body of the method
740        # (transition from state 10 to state 2). It also does not allow
741        # definitions of public pure virtual methods, except for
742        # TranslatorAdapterBase (states 8 and 9). Argument identifier inside
743        # method argument lists can be omitted or commented.
744        #
745        # Let's collect readable form of all public method prototypes in
746        # the readable form -- as defined in the source file.
747        # Let's collect also unified form of the same prototype that omits
748        # everything that can be omitted, namely 'virtual' and argument
749        # identifiers.
750        prototype = ''    # readable prototype (with everything)
751        uniPrototype = '' # unified prototype (without arg. identifiers)
752        warning = ''      # warning message -- if something special detected
753        methodId = None   # processed method id
754
755        # Collect the method prototypes. Stop on the closing
756        # curly brace followed by the semicolon (end of class).
757        status = 0
758        curlyCnt = 0      # counter for the level of curly braces
759
760        # Loop until the final state 777 is reached. The errors are processed
761        # immediately. In this implementation, it always quits the application.
762        while status != 777:
763
764            # Get the next token.
765            tokenId, tokenStr, tokenLineNo = next(tokenIterator)
766
767            if status == 0:      # waiting for 'public:'
768                if tokenId == 'public':
769                    status = 1
770                elif tokenId == 'eof':  # non-public things until the eof
771                    status = 777
772
773            elif status == 1:    # colon after the 'public'
774                if tokenId == 'colon':
775                    status = 2
776                else:
777                    self.__unexpectedToken(status, tokenId, tokenLineNo)
778
779            elif status == 2:    # waiting for 'virtual' (can be omitted)
780                if tokenId == 'virtual':
781                    prototype = tokenStr  # but not to unified prototype
782                    status = 3
783                elif tokenId == 'id':     # 'virtual' was omitted
784                    prototype = tokenStr
785                    uniPrototype = tokenStr  # start collecting the unified prototype
786                    status = 4
787                elif tokenId == 'comment':
788                    pass
789                elif tokenId == 'protected' or tokenId == 'private':
790                    status = 0
791                elif tokenId == 'rcurly':
792                    status = 11         # expected end of class
793                else:
794                    self.__unexpectedToken(status, tokenId, tokenLineNo)
795
796            elif status == 3:    # return type of the method expected
797                if tokenId == 'id':
798                    prototype += ' ' + tokenStr
799                    uniPrototype = tokenStr  # start collecting the unified prototype
800                    status = 4
801                else:
802                    self.__unexpectedToken(status, tokenId, tokenLineNo)
803
804            elif status == 4:    # method identifier expected
805                if tokenId == 'id':
806                    prototype += ' ' + tokenStr
807                    uniPrototype += ' ' + tokenStr
808                    methodId = tokenStr    # for reporting
809                    status = 5
810                else:
811                    self.__unexpectedToken(status, tokenId, tokenLineNo)
812
813            elif status == 5:    # left bracket of the argument list expected
814                if tokenId == 'lpar':
815                    prototype += tokenStr
816                    uniPrototype += tokenStr
817                    status = 6
818                else:
819                    self.__unexpectedToken(status, tokenId, tokenLineNo)
820
821            elif status == 6:    # collecting arguments of the method
822                if tokenId == 'rpar':
823                    prototype += tokenStr
824                    uniPrototype += tokenStr
825                    status = 7
826                elif tokenId == 'const':
827                    prototype += tokenStr
828                    uniPrototype += tokenStr
829                    status = 12
830                elif tokenId == 'id':           # type identifier
831                    prototype += tokenStr
832                    uniPrototype += tokenStr
833                    status = 13
834                else:
835                    self.__unexpectedToken(status, tokenId, tokenLineNo)
836
837            elif status == 7:    # left curly brace expected
838                if tokenId == 'lcurly':
839                    curlyCnt = 1      # method body entered
840                    status = 10
841                elif tokenId == 'comment':
842                    pass
843                elif tokenId == 'assign': # allowed only for TranslatorAdapterBase
844                    assert(self.classId == 'TranslatorAdapterBase')
845                    status = 8
846                else:
847                    self.__unexpectedToken(status, tokenId, tokenLineNo)
848
849            elif status == 8:    # zero expected (TranslatorAdapterBase)
850                assert(self.classId == 'TranslatorAdapterBase')
851                if tokenId == 'num' and tokenStr == '0':
852                    status = 9
853                else:
854                    self.__unexpectedToken(status, tokenId, tokenLineNo)
855
856            elif status == 9:    # after semicolon (TranslatorAdapterBase)
857                assert(self.classId == 'TranslatorAdapterBase')
858                if tokenId == 'semic':
859                    status = 2
860                else:
861                    self.__unexpectedToken(status, tokenId, tokenLineNo)
862
863            elif status == 10:   # consuming the body of the method, then dic item
864                if tokenId == 'rcurly':
865                    curlyCnt -= 1
866                    if curlyCnt == 0:
867                        # Check for possible copy/paste error when name
868                        # of the method was not corrected (i.e. the same
869                        # name already exists).
870                        if uniPrototype in self.prototypeDic:
871                            msg = "'%s' prototype found again (duplicity)\n"
872                            msg += "in '%s'.\n" % self.fname
873                            msg = msg % uniPrototype
874                            sys.stderr.write(msg)
875                            assert False
876
877                        assert(uniPrototype not in self.prototypeDic)
878                        # Insert new dictionary item.
879                        self.prototypeDic[uniPrototype] = prototype
880                        status = 2      # body consumed
881                        methodId = None # outside of any method
882                elif tokenId == 'lcurly':
883                    curlyCnt += 1
884
885                # Warn in special case.
886                elif methodId == 'trLegendDocs' and tokenId == 'string' \
887                    and tokenStr.find('MAX_DOT_GRAPH_HEIGHT') >= 0:
888                        self.txtMAX_DOT_GRAPH_HEIGHT_flag = True
889
890
891            elif status == 11:   # probably the end of class
892                if tokenId == 'semic':
893                    status = 777
894                else:
895                    self.__unexpectedToken(status, tokenId, tokenLineNo)
896
897            elif status == 12:   # type id for argument expected
898                if tokenId == 'id':
899                    prototype += ' ' + tokenStr
900                    uniPrototype += ' ' + tokenStr
901                    status = 13
902                else:
903                    self.__unexpectedToken(status, tokenId, tokenLineNo)
904
905            elif status == 13:   # :: or * or & or id or ) expected
906                if tokenId == 'colon':        # was namespace id
907                    prototype += tokenStr
908                    uniPrototype += tokenStr
909                    status = 14
910                elif tokenId == 'star' or tokenId == 'amp':  # pointer or reference
911                    prototype += ' ' + tokenStr
912                    uniPrototype += ' ' + tokenStr
913                    status = 16
914                elif tokenId == 'id':         # argument identifier
915                    prototype += ' ' + tokenStr
916                    # don't put this into unified prototype
917                    status = 17
918                elif tokenId == 'comment':    # probably commented-out identifier
919                    prototype += tokenStr
920                elif tokenId == 'rpar':
921                    prototype += tokenStr
922                    uniPrototype += tokenStr
923                    status = 7
924                elif tokenId == 'comma':
925                    prototype += ', '
926                    uniPrototype += ', '
927                    status = 6
928                else:
929                    self.__unexpectedToken(status, tokenId, tokenLineNo)
930
931            elif status == 14:   # second colon for namespace:: expected
932                if tokenId == 'colon':
933                    prototype += tokenStr
934                    uniPrototype += tokenStr
935                    status = 15
936                else:
937                    self.__unexpectedToken(status, tokenId, tokenLineNo)
938
939            elif status == 15:   # type after namespace:: expected
940                if tokenId == 'id':
941                    prototype += tokenStr
942                    uniPrototype += tokenStr
943                    status = 13
944                else:
945                    self.__unexpectedToken(status, tokenId, tokenLineNo)
946
947            elif status == 16:   # argument identifier or ) expected
948                if tokenId == 'id':
949                    prototype += ' ' + tokenStr
950                    # don't put this into unified prototype
951                    status = 17
952                elif tokenId == 'rpar':
953                    prototype += tokenStr
954                    uniPrototype += tokenStr
955                    status = 7
956                elif tokenId == 'comment':
957                    prototype += tokenStr
958                else:
959                    self.__unexpectedToken(status, tokenId, tokenLineNo)
960
961            elif status == 17:   # comma or ')' after argument identifier expected
962                if tokenId == 'comma':
963                    prototype += ', '
964                    uniPrototype += ', '
965                    status = 6
966                elif tokenId == 'rpar':
967                    prototype += tokenStr
968                    uniPrototype += tokenStr
969                    status = 7
970                else:
971                    self.__unexpectedToken(status, tokenId, tokenLineNo)
972
973
974
975    def collectAdapterPrototypes(self):
976        """Returns the dictionary of prototypes implemented by adapters.
977
978        It is created to process the translator_adapter.h. The returned
979        dictionary has the form: unifiedPrototype -> (version, classId)
980        thus by looking for the prototype, we get the information what is
981        the newest (least adapting) adapter that is sufficient for
982        implementing the method."""
983
984        # Start the token generator which parses the class source file.
985        assert(os.path.split(self.fname)[1] == 'translator_adapter.h')
986        tokenIterator = self.__tokenGenerator()
987
988        # Get the references to the involved dictionaries.
989        reqDic = self.manager.requiredMethodsDic
990
991        # Create the empty dictionary that will be returned.
992        adaptDic = {}
993
994
995        # Loop through the source of the adapter file until no other adapter
996        # class is found.
997        while True:
998            try:
999                # Collect the class and the base class identifiers.
1000                self.__collectClassInfo(tokenIterator)
1001
1002                # Extract the comparable version of the adapter class.
1003                # Note: The self.status as set by self.__collectClassInfo()
1004                # contains similar version, but is related to the base class,
1005                # not to the class itself.
1006                lst = self.classId.split('_')
1007                version = ''
1008                if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise
1009                    version = lst[1] + '.' + lst[2]
1010                    if len(lst) > 3:        # add the last part of the number
1011                        version += '.' + ('%02d' % int(lst[3]))
1012                    else:
1013                        version += '.00'
1014
1015                # Collect the prototypes of implemented public methods.
1016                self.__collectPublicMethodPrototypes(tokenIterator)
1017
1018                # For the required methods, update the dictionary of methods
1019                # implemented by the adapter.
1020                for protoUni in self.prototypeDic:
1021                    if protoUni in reqDic:
1022                        # This required method will be marked as implemented
1023                        # by this adapter class. This implementation assumes
1024                        # that newer adapters do not reimplement any required
1025                        # methods already implemented by older adapters.
1026                        assert(protoUni not in adaptDic)
1027                        adaptDic[protoUni] = (version, self.classId)
1028
1029                # Clear the dictionary object and the information related
1030                # to the class as the next adapter class is to be processed.
1031                self.prototypeDic.clear()
1032                self.classId = None
1033                self.baseClassId = None
1034
1035            except StopIteration:
1036                break
1037
1038        # Return the result dictionary.
1039        return adaptDic
1040
1041
1042    def processing(self):
1043        """Processing of the source file -- only for TranslatorXxxx classes."""
1044
1045        # Start the token generator which parses the class source file.
1046        tokenIterator = self.__tokenGenerator()
1047
1048        # Collect the class and the base class identifiers.
1049        self.__collectClassInfo(tokenIterator)
1050        assert(self.classId != 'Translator')
1051        assert(self.classId[:17] != 'TranslatorAdapter')
1052
1053        # Collect the prototypes of implemented public methods.
1054        self.__collectPublicMethodPrototypes(tokenIterator)
1055
1056        # Eat the rest of the source to cause closing the file.
1057        while True:
1058            try:
1059                t = next(tokenIterator)
1060            except StopIteration:
1061                break
1062
1063        # Shorthands for the used dictionaries.
1064        reqDic = self.manager.requiredMethodsDic
1065        adaptDic = self.manager.adaptMethodsDic
1066        myDic = self.prototypeDic
1067
1068        # Build the list of obsolete methods.
1069        self.obsoleteMethods = []
1070        for p in myDic:
1071            if p not in reqDic:
1072                self.obsoleteMethods.append(p)
1073        self.obsoleteMethods.sort()
1074
1075        # Build the list of missing methods and the list of implemented
1076        # required methods.
1077        self.missingMethods = []
1078        self.implementedMethods = []
1079        for p in reqDic:
1080            if p in myDic:
1081                self.implementedMethods.append(p)
1082            else:
1083                self.missingMethods.append(p)
1084        self.missingMethods.sort()
1085        self.implementedMethods.sort()
1086
1087        # Check whether adapter must be used or suggest the newest one.
1088        # Change the status and set the note accordingly.
1089        if self.baseClassId != 'Translator':
1090            if not self.missingMethods:
1091                self.note = 'Change the base class to Translator.'
1092                self.status = ''
1093                self.readableStatus = 'almost up-to-date'
1094            elif self.baseClassId != 'TranslatorEnglish':
1095                # The translator uses some of the adapters.
1096                # Look at the missing methods and check what adapter
1097                # implements them. Remember the one with the lowest version.
1098                adaptMinVersion = '9.9.99'
1099                adaptMinClass = 'TranslatorAdapter_9_9_99'
1100                for uniProto in self.missingMethods:
1101                    if uniProto in adaptDic:
1102                        version, cls = adaptDic[uniProto]
1103                        if version < adaptMinVersion:
1104                            adaptMinVersion = version
1105                            adaptMinClass = cls
1106
1107                # Test against the current status -- preserve the self.status.
1108                # Possibly, the translator implements enough methods to
1109                # use some newer adapter.
1110                status = self.status
1111
1112                # If the version of the used adapter is smaller than
1113                # the required, set the note and update the status as if
1114                # the newer adapter was used.
1115                if adaptMinVersion > status:
1116                    self.note = 'Change the base class to %s.' % adaptMinClass
1117                    self.status = adaptMinVersion
1118                    self.adaptMinClass = adaptMinClass
1119                    self.readableStatus = adaptMinVersion # simplified
1120
1121        # If everything seems OK, some explicit warning flags still could
1122        # be set.
1123        if not self.note and self.status == '' and \
1124           (self.translateMeFlag or self.txtMAX_DOT_GRAPH_HEIGHT_flag):
1125           self.note = ''
1126           if self.translateMeFlag:
1127               self.note += 'The "%s" found in a comment.' % self.translateMeText
1128           if self.note != '':
1129               self.note += '\n\t\t'
1130           if self.txtMAX_DOT_GRAPH_HEIGHT_flag:
1131               self.note += 'The MAX_DOT_GRAPH_HEIGHT found in trLegendDocs()'
1132
1133        # If everything seems OK, but there are obsolete methods, set
1134        # the note to clean-up source. This note will be used only when
1135        # the previous code did not set another note (priority).
1136        if not self.note and self.status == '' and self.obsoleteMethods:
1137            self.note = 'Remove the obsolete methods (never used).'
1138
1139        # If there is at least some note but the status suggests it is
1140        # otherwise up-to-date, mark is as ALMOST up-to-date.
1141        if self.note and self.status == '':
1142            self.readableStatus = 'almost up-to-date'
1143
1144
1145    def report(self, fout):
1146        """Returns the report part for the source as a multiline string.
1147
1148        No output for up-to-date translators without problem."""
1149
1150        # If there is nothing to report, return immediately.
1151        if self.status == '' and not self.note:
1152            return
1153
1154        # Report the number of not implemented methods.
1155        fout.write('\n\n\n')
1156        fout.write(self.classId + '   (' + self.baseClassId + ')')
1157        percentImplemented = 100    # init
1158        allNum = len(self.manager.requiredMethodsDic)
1159        if self.missingMethods:
1160            num = len(self.missingMethods)
1161            percentImplemented = 100 * (allNum - num) / allNum
1162            fout.write('  %d' % num)
1163            fout.write(' method')
1164            if num > 1:
1165                fout.write('s')
1166            fout.write(' to implement (%d %%)' % (100 * num / allNum))
1167        fout.write('\n' + '-' * len(self.classId))
1168
1169        # Write the info about the implemented required methods.
1170        fout.write('\n\n  Implements %d' % len(self.implementedMethods))
1171        fout.write(' of the required methods (%d %%).' % percentImplemented)
1172
1173        # Report the missing method, but only when it is not English-based
1174        # translator.
1175        if self.missingMethods and self.status != 'En':
1176            fout.write('\n\n  Missing methods (should be implemented):\n')
1177            reqDic = self.manager.requiredMethodsDic
1178            for p in self.missingMethods:
1179                fout.write('\n    ' + reqDic[p])
1180
1181        # Always report obsolete methods.
1182        if self.obsoleteMethods:
1183            fout.write('\n\n  Obsolete methods (should be removed, never used):\n')
1184            myDic = self.prototypeDic
1185            for p in self.obsoleteMethods:
1186                fout.write('\n    ' + myDic[p])
1187
1188        # For English-based translator, report the implemented methods.
1189        if self.status == 'En' and self.implementedMethods:
1190            fout.write('\n\n  This English-based translator implements ')
1191            fout.write('the following methods:\n')
1192            reqDic = self.manager.requiredMethodsDic
1193            for p in self.implementedMethods:
1194                fout.write('\n    ' + reqDic[p])
1195
1196
1197    def getmtime(self):
1198        """Returns the last modification time of the source file."""
1199        assert(os.path.isfile(self.fname))
1200        return os.path.getmtime(self.fname)
1201
1202
1203class TrManager:
1204    """Collects basic info and builds subordinate Transl objects."""
1205
1206    def __init__(self):
1207        """Determines paths, creates and initializes structures.
1208
1209        The arguments of the script may explicitly say what languages should
1210        be processed. Write the two letter identifications that are used
1211        for composing the source filenames, so...
1212
1213            python translator.py cz
1214
1215        this will process only translator_cz.h source.
1216        """
1217
1218        # Determine the path to the script and its name.
1219        self.script = os.path.abspath(sys.argv[0])
1220        self.script_path, self.script_name = os.path.split(self.script)
1221        self.script_path = os.path.abspath(self.script_path)
1222
1223        # Determine the absolute path to the Doxygen's root subdirectory.
1224        # If DOXYGEN environment variable is not found, the directory is
1225        # determined from the path of the script.
1226        doxy_default = os.path.join(self.script_path, '..')
1227        self.doxy_path = os.path.abspath(os.getenv('DOXYGEN', doxy_default))
1228
1229        # Build the path names based on the Doxygen's root knowledge.
1230        self.doc_path = os.path.join(self.doxy_path, 'doc')
1231        self.src_path = os.path.join(self.doxy_path, 'src')
1232        #  Normally the original sources aren't in the current directory
1233        # (as we are in the build directory) so we have to specify the
1234        # original source /documentation / ... directory.
1235        self.org_src_path = self.src_path
1236        self.org_doc_path = self.doc_path
1237        self.org_doxy_path = self.doxy_path
1238        if (len(sys.argv) > 1 and os.path.isdir(os.path.join(sys.argv[1], 'src'))):
1239            self.org_src_path = os.path.join(sys.argv[1], 'src')
1240            self.org_doc_path = os.path.join(sys.argv[1], 'doc')
1241            self.org_doxy_path = sys.argv[1]
1242            # Get the explicit arguments of the script.
1243            self.script_argLst = sys.argv[2:]
1244        else:
1245            # Get the explicit arguments of the script.
1246            self.script_argLst = sys.argv[1:]
1247
1248        # Create the empty dictionary for Transl object identified by the
1249        # class identifier of the translator.
1250        self.__translDic = {}
1251
1252        # Create the None dictionary of required methods. The key is the
1253        # unified prototype, the value is the full prototype. Set inside
1254        # the self.__build().
1255        self.requiredMethodsDic = None
1256
1257        # Create the empty dictionary that says what method is implemented
1258        # by what adapter.
1259        self.adaptMethodsDic = {}
1260
1261        # The last modification time will capture the modification of this
1262        # script, of the translator.h, of the translator_adapter.h (see the
1263        # self.__build() for the last two) of all the translator_xx.h files
1264        # and of the template for generating the documentation. So, this
1265        # time can be compared with modification time of the generated
1266        # documentation to decide, whether the doc should be re-generated.
1267        self.lastModificationTime = os.path.getmtime(self.script)
1268
1269        # Set the names of the translator report text file, of the template
1270        # for generating "Internationalization" document, for the generated
1271        # file itself, and for the maintainers list.
1272        self.translatorReportFileName = 'translator_report.txt'
1273        self.maintainersFileName = 'maintainers.txt'
1274        self.languageTplFileName = 'language.tpl'
1275        self.languageDocFileName = 'language.doc'
1276
1277        # The information about the maintainers will be stored
1278        # in the dictionary with the following name.
1279        self.__maintainersDic = None
1280
1281        # Define the other used structures and variables for information.
1282        self.langLst = None                   # including English based
1283        self.supportedLangReadableStr = None  # coupled En-based as a note
1284        self.numLang = None                   # excluding coupled En-based
1285        self.doxVersion = None                # Doxygen version
1286
1287        # Build objects where each one is responsible for one translator.
1288        self.__build()
1289
1290
1291    def __build(self):
1292        """Find the translator files and build the objects for translators."""
1293
1294        # The translator.h must exist (the Transl object will check it),
1295        # create the object for it and let it build the dictionary of
1296        # required methods.
1297        tr = Transl(os.path.join(self.org_src_path, 'translator.h'), self)
1298        self.requiredMethodsDic = tr.collectPureVirtualPrototypes()
1299        tim = tr.getmtime()
1300        if tim > self.lastModificationTime:
1301            self.lastModificationTime = tim
1302
1303        # The translator_adapter.h must exist (the Transl object will check it),
1304        # create the object for it and store the reference in the dictionary.
1305        tr = Transl(os.path.join(self.org_src_path, 'translator_adapter.h'), self)
1306        self.adaptMethodsDic = tr.collectAdapterPrototypes()
1307        tim = tr.getmtime()
1308        if tim > self.lastModificationTime:
1309            self.lastModificationTime = tim
1310
1311        # Create the list of the filenames with language translator sources.
1312        # If the explicit arguments of the script were typed, process only
1313        # those files.
1314        if self.script_argLst:
1315            lst = ['translator_' + x + '.h' for x in self.script_argLst]
1316            for fname in lst:
1317                if not os.path.isfile(os.path.join(self.org_src_path, fname)):
1318                    sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
1319                    sys.exit(1)
1320        else:
1321            lst = os.listdir(self.org_src_path)
1322            lst = [x for x in lst if x[:11] == 'translator_'
1323                                   and x[-2:] == '.h'
1324                                   and x != 'translator_adapter.h']
1325
1326        # Build the object for the translator_xx.h files, and process the
1327        # content of the file. Then insert the object to the dictionary
1328        # accessed via classId.
1329        for fname in lst:
1330            fullname = os.path.join(self.org_src_path, fname)
1331            tr = Transl(fullname, self)
1332            tr.processing()
1333            assert(tr.classId != 'Translator')
1334            self.__translDic[tr.classId] = tr
1335
1336        # Extract the global information of the processed info.
1337        self.__extractProcessedInfo()
1338
1339
1340    def __extractProcessedInfo(self):
1341        """Build lists and strings of the processed info."""
1342
1343        # Build the auxiliary list with strings compound of the status,
1344        # readable form of the language, and classId.
1345        statLst = []
1346        for obj in list(self.__translDic.values()):
1347            assert(obj.classId != 'Translator')
1348            s = obj.status + '|' + obj.langReadable + '|' + obj.classId
1349            statLst.append(s)
1350
1351        # Sort the list and extract the object identifiers (classId's) for
1352        # the up-to-date translators and English-based translators.
1353        statLst.sort()
1354        self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|']
1355        self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En']
1356
1357        # Reverse the list and extract the TranslatorAdapter based translators.
1358        statLst.reverse()
1359        self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()]
1360
1361        # Build the list of tuples that contain (langReadable, obj).
1362        # Sort it by readable name.
1363        self.langLst = []
1364        for obj in list(self.__translDic.values()):
1365            self.langLst.append((obj.langReadable, obj))
1366
1367        self.langLst.sort(key=lambda x: x[0])
1368
1369        # Create the list with readable language names. If the language has
1370        # also the English-based version, modify the item by appending
1371        # the note. Number of the supported languages is equal to the length
1372        # of the list.
1373        langReadableLst = []
1374        for name, obj in self.langLst:
1375            if obj.status == 'En': continue
1376
1377            # Append the 'En' to the classId to possibly obtain the classId
1378            # of the English-based object. If the object exists, modify the
1379            # name for the readable list of supported languages.
1380            classIdEn = obj.classId + 'En'
1381            if classIdEn in self.__translDic:
1382                name += ' (+En)'
1383
1384            # Append the result name of the language, possibly with note.
1385            langReadableLst.append(name)
1386
1387        # Create the multiline string of readable language names,
1388        # with punctuation, wrapped to paragraph.
1389        if len(langReadableLst) == 1:
1390            s = langReadableLst[0]
1391        elif len(langReadableLst) == 2:
1392            s = ' and '.join(langReadableLst)
1393        else:
1394            s = ', '.join(langReadableLst[:-1]) + ', and '
1395            s += langReadableLst[-1]
1396
1397        self.supportedLangReadableStr = fill(s + '.')
1398
1399        # Find the number of the supported languages. The English based
1400        # languages are not counted if the non-English based also exists.
1401        self.numLang = len(self.langLst)
1402        for name, obj in self.langLst:
1403            if obj.status == 'En':
1404                classId = obj.classId[:-2]
1405                if classId in self.__translDic:
1406                    self.numLang -= 1    # the couple will be counted as one
1407
1408        # Extract the version of Doxygen.
1409        f = xopen(os.path.join(self.org_doxy_path, 'VERSION'))
1410        self.doxVersion = f.readline().strip()
1411        f.close()
1412
1413        # Update the last modification time.
1414        for tr in list(self.__translDic.values()):
1415            tim = tr.getmtime()
1416            if tim > self.lastModificationTime:
1417                self.lastModificationTime = tim
1418
1419
1420    def __getNoTrSourceFilesLst(self):
1421        """Returns the list of sources to be checked.
1422
1423        All .cpp files and also .h files that do not declare or define
1424        the translator methods are included in the list. The file names
1425        are searched in doxygen/src directory.
1426        """
1427        files = []
1428        for item in os.listdir(self.org_src_path):
1429            # Split the bare name to get the extension.
1430            name, ext = os.path.splitext(item)
1431            ext = ext.lower()
1432
1433            # Include only .cpp and .h files (case independent) and exclude
1434            # the files where the checked identifiers are defined.
1435            if ext == '.cpp' or ext ==  '.l' or (ext == '.h' and name.find('translator') == -1):
1436                fname = os.path.join(self.org_src_path, item)
1437                assert os.path.isfile(fname) # assumes no directory with the ext
1438                files.append(fname)          # full name
1439        return files
1440
1441
1442    def __removeUsedInFiles(self, fname, dic):
1443        """Removes items for method identifiers that are found in fname.
1444
1445        The method reads the content of the file as one string and searches
1446        for all identifiers from dic. The identifiers that were found in
1447        the file are removed from the dictionary.
1448
1449        Note: If more files is to be checked, the files where most items are
1450        probably used should be checked first and the resulting reduced
1451        dictionary should be used for checking the next files (speed up).
1452        """
1453        lst_in = list(dic.keys())   # identifiers to be searched for
1454
1455        # Read content of the file as one string.
1456        assert os.path.isfile(fname)
1457        try:
1458            with xopen(fname) as f:
1459                cont = f.read()
1460                cont = ''.join(cont.split('\n')) # otherwise the 'match' function won't work.
1461        except UnicodeDecodeError:
1462            print("Skipping {0} because of decoding errors".format(fname))
1463            return
1464
1465        # Remove the items for identifiers that were found in the file.
1466        while lst_in:
1467            item = lst_in.pop(0)
1468            rexItem = re.compile('.*' + item + ' *\(')
1469            if rexItem.match(cont):
1470                del dic[item]
1471
1472
1473    def __checkForNotUsedTrMethods(self):
1474        """Returns the dictionary of not used translator methods.
1475
1476        The method can be called only after self.requiredMethodsDic has been
1477        built. The stripped prototypes are the values, the method identifiers
1478        are the keys.
1479        """
1480        # Build the dictionary of the required method prototypes with
1481        # method identifiers used as keys.
1482        trdic = {}
1483        for prototype in list(self.requiredMethodsDic.keys()):
1484            ri = prototype.split('(')[0]
1485            identifier = ri.split()[1].strip()
1486            trdic[identifier] = prototype
1487
1488        # Build the list of source files where translator method identifiers
1489        # can be used.
1490        files = self.__getNoTrSourceFilesLst()
1491
1492        # Loop through the files and reduce the dictionary of id -> proto.
1493        for fname in files:
1494            self.__removeUsedInFiles(fname, trdic)
1495
1496        # Return the dictionary of not used translator methods.
1497        return trdic
1498
1499
1500    def __emails(self, classId):
1501        """Returns the list of maintainer emails.
1502
1503        The method returns the list of e-mail addresses for the translator
1504        class, but only the addresses that were not marked as [xxx]."""
1505        lst = []
1506        for m in self.__maintainersDic[classId]:
1507            if not m[1].startswith('['):
1508                email = m[1]
1509                email = email.replace(' at ', '@') # Unmangle the mangled e-mail
1510                email = email.replace(' dot ', '.')
1511                lst.append(email)
1512        return lst
1513
1514
1515    def getBgcolorByReadableStatus(self, readableStatus):
1516        if readableStatus == 'up-to-date':
1517            color = '#ccffcc'    # green
1518        elif readableStatus.startswith('almost'):
1519            color = '#ffffff'    # white
1520        elif readableStatus.startswith('English'):
1521            color = '#ccffcc'    # green
1522        elif readableStatus.startswith('1.8'):
1523            color = '#ffffcc'    # yellow
1524        elif readableStatus.startswith('1.7'):
1525            color = '#ffcccc'    # pink
1526        elif readableStatus.startswith('1.6'):
1527            color = '#ffcccc'    # pink
1528        else:
1529            color = '#ff5555'    # red
1530        return color
1531
1532
1533    def generateTranslatorReport(self):
1534        """Generates the translator report."""
1535
1536        output = os.path.join(self.doc_path, self.translatorReportFileName)
1537
1538        # Open the textual report file for the output.
1539        f = xopen(output, 'w')
1540
1541        # Output the information about the version.
1542        f.write('(' + self.doxVersion + ')\n\n')
1543
1544        # Output the information about the number of the supported languages
1545        # and the list of the languages, or only the note about the explicitly
1546        # given languages to process.
1547        if self.script_argLst:
1548            f.write('The report was generated for the following, explicitly')
1549            f.write(' identified languages:\n\n')
1550            f.write(self.supportedLangReadableStr + '\n\n')
1551        else:
1552            f.write('Doxygen supports the following ')
1553            f.write(str(self.numLang))
1554            f.write(' languages (sorted alphabetically):\n\n')
1555            f.write(self.supportedLangReadableStr + '\n\n')
1556
1557            # Write the summary about the status of language translators (how
1558            # many translators) are up-to-date, etc.
1559            s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst)
1560            s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst)
1561            s += 'and %d are English based.' % len(self.EnBasedIdLst)
1562            f.write(fill(s) + '\n\n')
1563
1564        # The e-mail addresses of the maintainers will be collected to
1565        # the auxiliary file in the order of translator classes listed
1566        # in the translator report.
1567        fmail = xopen(os.path.join(self.doc_path, 'mailto.txt'), 'w')
1568
1569        # Write the list of "up-to-date" translator classes.
1570        if self.upToDateIdLst:
1571            s = '''The following translator classes are up-to-date (sorted
1572                alphabetically). This means that they derive from the
1573                Translator class, they implement all %d of the required
1574                methods, and even minor problems were not spotted by the script:'''
1575            s = s % len(self.requiredMethodsDic)
1576            f.write('-' * 70 + '\n')
1577            f.write(fill(s) + '\n\n')
1578
1579            mailtoLst = []
1580            for x in self.upToDateIdLst:
1581                obj = self.__translDic[x]
1582                if obj.note is None:
1583                    f.write('  ' + obj.classId + '\n')
1584                    mailtoLst.extend(self.__emails(obj.classId))
1585
1586            fmail.write('up-to-date\n')
1587            fmail.write('; '.join(mailtoLst))
1588
1589
1590            # Write separately the list of "ALMOST up-to-date" translator classes.
1591            s = '''The following translator classes are ALMOST up-to-date (sorted
1592                alphabetically). This means that they derive from the
1593                Translator class, but there still may be some minor problems
1594                listed for them:'''
1595            f.write('\n' + ('-' * 70) + '\n')
1596            f.write(fill(s) + '\n\n')
1597            mailtoLst = []
1598            for x in self.upToDateIdLst:
1599                obj = self.__translDic[x]
1600                if obj.note is not None:
1601                    f.write('  ' + obj.classId + '\t-- ' + obj.note + '\n')
1602                    mailtoLst.extend(self.__emails(obj.classId))
1603
1604            fmail.write('\n\nalmost up-to-date\n')
1605            fmail.write('; '.join(mailtoLst))
1606
1607        # Write the list of the adapter based classes. The very obsolete
1608        # translators that derive from TranslatorEnglish are included.
1609        if self.adaptIdLst:
1610            s = '''The following translator classes need maintenance
1611                (the most obsolete at the end). The other info shows the
1612                estimation of Doxygen version when the class was last
1613                updated and number of methods that must be implemented to
1614                become up-to-date:'''
1615            f.write('\n' + '-' * 70 + '\n')
1616            f.write(fill(s) + '\n\n')
1617
1618            # Find also whether some adapter classes may be removed.
1619            adaptMinVersion = '9.9.99'
1620
1621            mailtoLst = []
1622            numRequired = len(self.requiredMethodsDic)
1623            for x in self.adaptIdLst:
1624                obj = self.__translDic[x]
1625                f.write('  %-30s' % obj.classId)
1626                f.write('  %-6s' % obj.readableStatus)
1627                numimpl = len(obj.missingMethods)
1628                pluralS = ''
1629                if numimpl > 1: pluralS = 's'
1630                percent = 100 * numimpl / numRequired
1631                f.write('\t%2d method%s to implement (%d %%)' % (
1632                        numimpl, pluralS, percent))
1633                if obj.note:
1634                    f.write('\n\tNote: ' + obj.note + '\n')
1635                f.write('\n')
1636                mailtoLst.extend(self.__emails(obj.classId)) # to maintainer
1637
1638                # Check the level of required adapter classes.
1639                if obj.status != '0.0.00' and obj.status < adaptMinVersion:
1640                    adaptMinVersion = obj.status
1641
1642            fmail.write('\n\ntranslator based\n')
1643            fmail.write('; '.join(mailtoLst))
1644
1645            # Set the note if some old translator adapters are not needed
1646            # any more. Do it only when the script is called without arguments,
1647            # i.e. all languages were checked against the needed translator
1648            # adapters.
1649            if not self.script_argLst:
1650                to_remove = {}
1651                for version, adaptClassId in list(self.adaptMethodsDic.values()):
1652                    if version < adaptMinVersion:
1653                        to_remove[adaptClassId] = True
1654
1655                if to_remove:
1656                    lst = list(to_remove.keys())
1657                    lst.sort()
1658                    plural = len(lst) > 1
1659                    note = 'Note: The adapter class'
1660                    if plural: note += 'es'
1661                    note += ' ' + ', '.join(lst)
1662                    if not plural:
1663                        note += ' is'
1664                    else:
1665                        note += ' are'
1666                    note += ' not used and can be removed.'
1667                    f.write('\n' + fill(note) + '\n')
1668
1669        # Write the list of the English-based classes.
1670        if self.EnBasedIdLst:
1671            s = '''The following translator classes derive directly from the
1672                TranslatorEnglish. The class identifier has the suffix 'En'
1673                that says that this is intentional. Usually, there is also
1674                a non-English based version of the translator for
1675                the language:'''
1676            f.write('\n' + '-' * 70 + '\n')
1677            f.write(fill(s) + '\n\n')
1678
1679            for x in self.EnBasedIdLst:
1680                obj = self.__translDic[x]
1681                f.write('  ' + obj.classId)
1682                f.write('\timplements %d methods' % len(obj.implementedMethods))
1683                if obj.note:
1684                    f.write(' -- ' + obj.note)
1685                f.write('\n')
1686
1687        # Check for not used translator methods and generate warning if found.
1688        # The check is rather time consuming, so it is not done when report
1689        # is restricted to explicitly given language identifiers.
1690        if not self.script_argLst:
1691            dic = self.__checkForNotUsedTrMethods()
1692            if dic:
1693                s = '''WARNING: The following translator methods are declared
1694                    in the Translator class but their identifiers do not appear
1695                    in source files. The situation should be checked. The .cpp
1696                    files and .h files excluding the '*translator*' files
1697                    in doxygen/src directory were simply searched for occurrence
1698                    of the method identifiers:'''
1699                f.write('\n' + '=' * 70 + '\n')
1700                f.write(fill(s) + '\n\n')
1701
1702                keys = list(dic.keys())
1703                keys.sort()
1704                for key in keys:
1705                    f.write('  ' + dic[key] + '\n')
1706                f.write('\n')
1707
1708        # Write the details for the translators.
1709        f.write('\n' + '=' * 70)
1710        f.write('\nDetails for translators (classes sorted alphabetically):\n')
1711
1712        cls = list(self.__translDic.keys())
1713        cls.sort()
1714
1715        for c in cls:
1716            obj = self.__translDic[c]
1717            assert(obj.classId != 'Translator')
1718            obj.report(f)
1719
1720        # Close the report file and the auxiliary file with e-mails.
1721        f.close()
1722        fmail.close()
1723
1724
1725    def __loadMaintainers(self):
1726        """Load and process the file with the maintainers.
1727
1728        Fills the dictionary classId -> [(name, e-mail), ...]."""
1729
1730        fname = os.path.join(self.org_doc_path, self.maintainersFileName)
1731
1732        # Include the maintainers file to the group of files checked with
1733        # respect to the modification time.
1734        tim = os.path.getmtime(fname)
1735        if tim > self.lastModificationTime:
1736            self.lastModificationTime = tim
1737
1738        # Process the content of the maintainers file.
1739        f = xopen(fname)
1740        inside = False  # inside the record for the language
1741        lineReady = True
1742        classId = None
1743        maintainersLst = None
1744        self.__maintainersDic = {}
1745        while lineReady:
1746            line = f.readline()            # next line
1747            lineReady = line != ''         # when eof, then line == ''
1748
1749            line = line.strip()            # eof should also behave as separator
1750            if line != '' and line[0] == '%':    # skip the comment line
1751                continue
1752
1753            if not inside:                 # if outside of the record
1754                if line != '':            # should be language identifier
1755                    classId = line
1756                    maintainersLst = []
1757                    inside = True
1758                # Otherwise skip empty line that do not act as separator.
1759
1760            else:                          # if inside the record
1761                if line == '':            # separator found
1762                    inside = False
1763                else:
1764                    # If it is the first maintainer, create the empty list.
1765                    if classId not in self.__maintainersDic:
1766                        self.__maintainersDic[classId] = []
1767
1768                    # Split the information about the maintainer and append
1769                    # the tuple. The address may be prefixed '[unreachable]'
1770                    # or whatever '[xxx]'. This will be processed later.
1771                    lst = line.split(':', 1)
1772                    assert(len(lst) == 2)
1773                    t = (lst[0].strip(), lst[1].strip())
1774                    self.__maintainersDic[classId].append(t)
1775        f.close()
1776
1777
1778    def generateLanguageDoc(self):
1779        """Checks the modtime of files and generates language.doc."""
1780        self.__loadMaintainers()
1781
1782        # Check the last modification time of the template file. It is the
1783        # last file from the group that decide whether the documentation
1784        # should or should not be generated.
1785        fTplName = os.path.join(self.org_doc_path, self.languageTplFileName)
1786        tim = os.path.getmtime(fTplName)
1787        if tim > self.lastModificationTime:
1788            self.lastModificationTime = tim
1789
1790        # If the generated documentation exists and is newer than any of
1791        # the source files from the group, do not generate it and quit
1792        # quietly.
1793        fDocName = os.path.join(self.doc_path, self.languageDocFileName)
1794        if os.path.isfile(fDocName):
1795            if os.path.getmtime(fDocName) > self.lastModificationTime:
1796                return
1797
1798        # The document or does not exist or is older than some of the
1799        # sources. It must be generated again.
1800        #
1801        # Read the template of the documentation, and remove the first
1802        # attention lines.
1803        f = xopen(fTplName)
1804        doctpl = f.read()
1805        f.close()
1806
1807        pos = doctpl.find('/***')
1808        assert pos != -1
1809        doctpl = doctpl[pos:]
1810
1811        # Fill the tplDic by symbols that will be inserted into the
1812        # document template.
1813        tplDic = {}
1814
1815        s = ('Do not edit this file. It was generated by the %s script.\n' +\
1816             ' * Edit the %s and %s files instead.') % (
1817             self.script_name, self.languageTplFileName, self.maintainersFileName)
1818        tplDic['editnote'] = s
1819
1820        tplDic['doxVersion'] = self.doxVersion
1821        tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr
1822        tplDic['translatorReportFileName'] = self.translatorReportFileName
1823
1824        ahref = '<a href="../doc/' + self.translatorReportFileName
1825        ahref += '"\n><code>doxygen/doc/'  + self.translatorReportFileName
1826        ahref += '</code></a>'
1827        tplDic['translatorReportLink'] = ahref
1828        tplDic['numLangStr'] = str(self.numLang)
1829
1830        # Define templates for HTML table parts of the documentation.
1831        htmlTableTpl = '''
1832            \\htmlonly
1833            </p>
1834            <table align="center" cellspacing="0" cellpadding="0" border="0">
1835            <tr bgcolor="#000000">
1836            <td>
1837              <table cellspacing="1" cellpadding="2" border="0">
1838              <tr bgcolor="#4040c0">
1839              <td ><b><font size="+1" color="#ffffff"> Language </font></b></td>
1840              <td ><b><font size="+1" color="#ffffff"> Maintainer </font></b></td>
1841              <td ><b><font size="+1" color="#ffffff"> Contact address </font>
1842                      <font size="-2" color="#ffffff">(replace the at and dot)</font></b></td>
1843              <td ><b><font size="+1" color="#ffffff"> Status </font></b></td>
1844              </tr>
1845              <!-- table content begin -->
1846            %s
1847              <!-- table content end -->
1848              </table>
1849            </td>
1850            </tr>
1851            </table>
1852            <p>
1853            \\endhtmlonly
1854            '''
1855        htmlTableTpl = textwrap.dedent(htmlTableTpl)
1856        htmlTrTpl = '\n  <tr bgcolor="#ffffff">%s\n  </tr>'
1857        htmlTdTpl = '\n    <td>%s</td>'
1858        htmlTdStatusColorTpl = '\n    <td bgcolor="%s">%s</td>'
1859
1860        # Loop through transl objects in the order of sorted readable names
1861        # and add generate the content of the HTML table.
1862        trlst = []
1863        for name, obj in self.langLst:
1864            # Fill the table data elements for one row. The first element
1865            # contains the readable name of the language. Only the oldest
1866            # translator are colour marked in the language column. Less
1867            # "heavy" color is used (when compared with the Status column).
1868            if obj.readableStatus.startswith('1.4'):
1869                bkcolor = self.getBgcolorByReadableStatus('1.4')
1870            else:
1871                bkcolor = '#ffffff'
1872
1873            lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ]
1874
1875            # The next two elements contain the list of maintainers
1876            # and the list of their mangled e-mails. For English-based
1877            # translators that are coupled with the non-English based,
1878            # insert the 'see' note.
1879            mm = None  # init -- maintainer
1880            ee = None  # init -- e-mail address
1881            if obj.status == 'En':
1882                # Check whether there is the coupled non-English.
1883                classId = obj.classId[:-2]
1884                if classId in self.__translDic:
1885                    lang = self.__translDic[classId].langReadable
1886                    mm = 'see the %s language' % lang
1887                    ee = '&nbsp;'
1888
1889            if not mm and obj.classId in self.__maintainersDic:
1890                # Build a string of names separated by the HTML break element.
1891                # Special notes used instead of names are highlighted.
1892                lm = []
1893                for maintainer in self.__maintainersDic[obj.classId]:
1894                    name = maintainer[0]
1895                    if name.startswith('--'):
1896                        name = '<span style="color: red; background-color: yellow">'\
1897                               + name + '</span>'
1898                    lm.append(name)
1899                mm = '<br/>'.join(lm)
1900
1901                # The marked addresses (they start with the mark '[unreachable]',
1902                # '[resigned]', whatever '[xxx]') will not be displayed at all.
1903                # Only the mark will be used instead.
1904                rexMark = re.compile('(?P<mark>\\[.*?\\])')
1905                le = []
1906                for maintainer in self.__maintainersDic[obj.classId]:
1907                    address = maintainer[1]
1908                    m = rexMark.search(address)
1909                    if m is not None:
1910                        address = '<span style="color: brown">'\
1911                                  + m.group('mark') + '</span>'
1912                    le.append(address)
1913                ee = '<br/>'.join(le)
1914
1915            # Append the maintainer and e-mail elements.
1916            lst.append(htmlTdTpl % mm)
1917            lst.append(htmlTdTpl % ee)
1918
1919            # The last element contains the readable form of the status.
1920            bgcolor = self.getBgcolorByReadableStatus(obj.readableStatus)
1921            lst.append(htmlTdStatusColorTpl % (bgcolor, obj.readableStatus))
1922
1923            # Join the table data to one table row.
1924            trlst.append(htmlTrTpl % (''.join(lst)))
1925
1926        # Join the table rows and insert into the template.
1927        htmlTable = htmlTableTpl % (''.join(trlst))
1928
1929        # Define templates for LaTeX table parts of the documentation.
1930        latexTableTpl = r'''
1931            \latexonly
1932            \footnotesize
1933            \begin{longtable}{|l|l|l|l|}
1934              \hline
1935              {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
1936              \hline
1937            %s
1938              \hline
1939            \end{longtable}
1940            \normalsize
1941            \endlatexonly
1942            '''
1943        latexTableTpl = textwrap.dedent(latexTableTpl)
1944        latexLineTpl = '\n' + r'  %s & %s & {\tt\tiny %s} & %s \\'
1945
1946        # Loop through transl objects in the order of sorted readable names
1947        # and add generate the content of the LaTeX table.
1948        trlst = []
1949        for name, obj in self.langLst:
1950            # For LaTeX, more maintainers for the same language are
1951            # placed on separate rows in the table.  The line separator
1952            # in the table is placed explicitly above the first
1953            # maintainer. Prepare the arguments for the LaTeX row template.
1954            maintainers = []
1955            if obj.classId in self.__maintainersDic:
1956                maintainers = self.__maintainersDic[obj.classId]
1957
1958            lang = obj.langReadable
1959            maintainer = None  # init
1960            email = None       # init
1961            if obj.status == 'En':
1962                # Check whether there is the coupled non-English.
1963                classId = obj.classId[:-2]
1964                if classId in self.__translDic:
1965                    langNE = self.__translDic[classId].langReadable
1966                    maintainer = 'see the %s language' % langNE
1967                    email = '~'
1968
1969            if not maintainer and (obj.classId in self.__maintainersDic):
1970                lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
1971                maintainer = maintainers[0][0]
1972                email = maintainers[0][1]
1973
1974            status = obj.readableStatus
1975
1976            # Use the template to produce the line of the table and insert
1977            # the hline plus the constructed line into the table content.
1978            # The underscore character must be escaped.
1979            trlst.append('\n  \\hline')
1980            s = latexLineTpl % (lang, maintainer, email, status)
1981            s = s.replace('_', '\\_')
1982            trlst.append(s)
1983
1984            # List the other maintainers for the language. Do not set
1985            # lang and status for them.
1986            lang = '~'
1987            status = '~'
1988            for m in maintainers[1:]:
1989                maintainer = m[0]
1990                email = m[1]
1991                s = latexLineTpl % (lang, maintainer, email, status)
1992                s = s.replace('_', '\\_')
1993                trlst.append(s)
1994
1995        # Join the table lines and insert into the template.
1996        latexTable = latexTableTpl % (''.join(trlst))
1997
1998        # Put the HTML and LaTeX parts together and define the dic item.
1999        tplDic['informationTable'] = htmlTable + '\n' + latexTable
2000
2001        # Insert the symbols into the document template and write it down.
2002        f = xopen(fDocName, 'w')
2003        f.write(doctpl % tplDic)
2004        f.close()
2005
2006if __name__ == '__main__':
2007
2008    # The Python 2.7+ or 3.3+ is required.
2009    major = sys.version_info[0]
2010    minor = sys.version_info[1]
2011    if (major == 2 and minor < 7) or (major == 3 and minor < 0):
2012        print('Python 2.7+ or Python 3.0+ are required for the script')
2013        sys.exit(1)
2014
2015    # The translator manager builds the Transl objects, parses the related
2016    # sources, and keeps them in memory.
2017    trMan = TrManager()
2018
2019    # Process the Transl objects and generate the output files.
2020    trMan.generateLanguageDoc()
2021    trMan.generateTranslatorReport()
2022