1#!/usr/bin/env python
2
3"""
4TeX
5
6This module contains the facilities for parsing TeX and LaTeX source.
7The `TeX' class is an iterator interface to (La)TeX source.  Simply
8feed a `TeX' instance using the input method, then iterate over the
9expanded tokens through the standard Python iterator interface.
10
11Example:
12    tex = TeX()
13    tex.input(open('foo.tex','r'))
14    for token in tex:
15        print token
16
17"""
18from io import IOBase
19import string, os, sys, plasTeX, subprocess
20from plasTeX.Tokenizer import Tokenizer, Token, EscapeSequence, Other
21from plasTeX import TeXDocument
22from plasTeX.Base.TeX.Primitives import MathShift
23from plasTeX import ParameterCommand, Macro
24from plasTeX import glue, muglue, mudimen, dimen, number
25from plasTeX.Logging import getLogger, disableLogging, fileLogging
26
27# Only export the TeX class
28__all__ = ['TeX']
29
30log = getLogger()
31status = getLogger('status')
32tokenlog = getLogger('parse.tokens')
33digestlog = getLogger('parse.digest')
34_type = type
35
36class bufferediter(object):
37    """ Buffered iterator """
38    def __init__(self, obj):
39        self._next = iter(obj).__next__
40        self._buffer = []
41    def __iter__(self):
42        return self
43    def __next__(self):
44        if self._buffer:
45            return self._buffer.pop()
46        return self._next()
47    def push(self, value):
48        self._buffer.append(value)
49
50class ArgumentContext(plasTeX.Macro):
51    pass
52
53class TeX(object):
54    """
55    TeX Stream
56
57    This class is the central TeX engine that does all of the
58    parsing, invoking of macros, etc.
59
60    """
61    documentClass = TeXDocument
62
63    def __init__(self, ownerDocument=None, myfile=None):
64        if ownerDocument is None:
65            ownerDocument = self.documentClass()
66            self.toplevel = True
67        elif myfile:
68            self.toplevel = True
69        else:
70            self.toplevel = False
71        self.ownerDocument = ownerDocument
72
73        # Input source stack
74        self.inputs = []
75
76        # Auxiliary files loaded
77        self.auxFiles = []
78
79        # TeX arguments types and their casting functions
80        self.argtypes = {
81            'url': (self.castNone, {'#':12,'~':12,'%':12,'&':12}),
82            'str': self.castString,
83            str: self.castString,
84            'chr': self.castString,
85            chr: self.castString,
86            'char': self.castString,
87            'cs': self.castControlSequence,
88            'label': self.castLabel,
89            'id': self.castLabel,
90            'idref': self.castRef,
91            'ref': self.castRef,
92            'nox': lambda x,**y: x,
93            'list': self.castList,
94            list: self.castList,
95            'dict': self.castDictionary,
96            dict: self.castDictionary,
97
98            # LaTeX versions of TeX internal parameters
99            'dimen': self.castDimen,
100            'dimension': self.castDimen,
101            'length': self.castDimen,
102#           'mudimen': self.castMuDimen,
103#           'glue':  self.castGlue,
104#           'muglue': self.castMuGlue,
105            'number': self.castNumber,
106            'count': self.castNumber,
107            'int': self.castNumber,
108            int: self.castNumber,
109            'float': self.castDecimal,
110            float: self.castDecimal,
111            'double': self.castDecimal,
112        }
113
114        # Starting parsing if a source was given
115        self.currentInput = (0,0)
116
117        self.jobname = None
118        if myfile is not None:
119            # Filename
120            if isinstance(myfile, (str, bytes)):
121                myfile = str(myfile)
122                '''
123                if config has no files structure
124                or encoding is not specified
125                or encoding is utf-8, make encoding utf_8_sig.
126                otherwise, use the specified encoding.
127                '''
128                try:
129                    encoding = self.ownerDocument.config['files'].get('input-encoding', 'utf_8_sig')
130                except (KeyError, AttributeError):
131                    encoding = 'utf_8_sig'
132
133                if encoding in ['utf8', 'utf-8', 'utf_8']:
134                    encoding = 'utf_8_sig'
135
136                fname = self.kpsewhich(myfile)
137                self.input(open(fname, encoding=encoding))
138                self.jobname = os.path.basename(os.path.splitext(myfile)[0])
139
140            # File object
141            else:
142                self.input(myfile)
143                self.jobname = os.path.basename(os.path.splitext(myfile.name)[0])
144
145    def input(self, source):
146        """
147        Add a new input source to the stack
148
149        Required Arguments:
150        source -- can be a string containing TeX source, a file object
151            which contains TeX source, or a list of tokens
152
153        """
154        if source is None:
155            return
156        if self.jobname is None:
157            if isinstance(source, str):
158                self.jobname = os.path.basename(os.path.splitext(source)[0])
159            elif isinstance(source, IOBase):
160                self.jobname = os.path.basename(os.path.splitext(source.name)[0])
161
162        t = Tokenizer(source, self.ownerDocument.context)
163        self.inputs.append((t, iter(t)))
164        self.currentInput = self.inputs[-1]
165        return self
166
167    def endInput(self):
168        """
169        Pop the most recent input source from the stack
170
171        """
172        if self.inputs:
173            self.inputs.pop()
174        if self.inputs:
175            self.currentInput = self.inputs[-1]
176
177    def loadPackage(self, myfile, options=None):
178        """
179        Load a LaTeX package
180
181        Required Arguments:
182        myfile -- name of the file to load
183
184        Keyword Arguments:
185        options -- options passed to the macro which is loading the package
186
187        """
188        options = options or {}
189        config = self.ownerDocument.config
190
191        try:
192            path = self.kpsewhich(myfile)
193        except OSError as msg:
194            log.warning(msg)
195            return False
196
197        # Try to load the actual LaTeX style file
198        status.info(' ( %s ' % path)
199
200        try:
201            encoding = config['files']['input-encoding']
202            with open(path, 'r', encoding=encoding) as f:
203                # Put in a flag so that we can parse past our own
204                # package tokens and throw them away, we don't want them in
205                # the output document.
206                flag = plasTeX.Command()
207                self.pushToken(flag)
208                self.input(f)
209                self.ownerDocument.context.packages[myfile] = options or {}
210                for tok in self:
211                    if tok is flag:
212                        break
213
214        except (OSError, IOError, TypeError) as msg:
215            if msg:
216                msg = ' ( %s )' % str(msg)
217            # Failed to load LaTeX style file
218            log.warning('Error opening package "%s"%s', myfile, msg)
219            status.info(' ) ')
220            return False
221
222        status.info(' ) ')
223
224        return True
225
226    @property
227    def filename(self):
228        return self.currentInput[0].filename
229
230    @property
231    def lineNumber(self):
232        return self.currentInput[0].lineNumber
233
234    @property
235    def lineInfo(self):
236        return ' in %s on line %s' % (self.filename, self.lineNumber)
237
238    @staticmethod
239    def disableLogging():
240        """ Turn off logging """
241        disableLogging()
242
243    def fileLogging(self):
244        fname = '%s/%s.log' % (os.path.dirname(self.filename), self.jobname)
245        fileLogging(fname)
246
247    def itertokens(self):
248        """
249        Iterate over unexpanded tokens
250
251        Returns:
252        generator that iterates through the unexpanded tokens
253
254        """
255        # Create locals before going into generator loop
256        inputs = self.inputs
257        context = self.ownerDocument.context
258        endInput = self.endInput
259        ownerDocument = self.ownerDocument
260
261        while inputs:
262            # Always get next token from top of input stack
263            try:
264                while 1:
265                    t = next(inputs[-1][-1])
266                    # Save context depth of each token for use in digestion
267                    t.contextDepth = context.depth
268                    t.ownerDocument = ownerDocument
269                    t.parentNode = None
270                    yield t
271
272            except StopIteration:
273                endInput()
274
275            # This really shouldn't happen, but just in case...
276            except IndexError:
277                break
278
279    def iterchars(self):
280        """
281        Iterate over input characters (untokenized)
282
283        Returns:
284        generator that iterates through the untokenized characters
285
286        """
287        # Create locals before going into generator loop
288        inputs = self.inputs
289        context = self.ownerDocument.context
290        endInput = self.endInput
291        ownerDocument = self.ownerDocument
292
293        while inputs:
294            # Walk through characters
295            try:
296                for char in inputs[-1][0].iterchars():
297                    yield char
298                else:
299                    endInput()
300            # This really shouldn't happen, but just in case...
301            except IndexError:
302                break
303
304    def __iter__(self):
305        """
306        Iterate over tokens while expanding them
307
308        Returns:
309        generator that iterates through the expanded tokens
310
311        """
312        # Cache variables before starting the generator
313        next = self.itertokens().__next__
314        pushToken = self.pushToken
315        pushTokens = self.pushTokens
316        createElement = self.ownerDocument.createElement
317        ELEMENT_NODE = Macro.ELEMENT_NODE
318
319        while 1:
320            # Get the next token
321            try:
322                token = next()
323            except StopIteration:
324                return
325
326            # Token is null, ignore it
327            if token is None:
328                continue
329
330            # Macro that has already been expanded
331            elif token.nodeType == ELEMENT_NODE:
332                pass
333
334            # We need to expand this one
335            elif token.macroName is not None:
336                try:
337                    # By default, invoke() should put the macro instance
338                    # itself into the output stream.  We'll handle this
339                    # automatically here if `None' is received.  If you
340                    # really don't want anything in the output stream,
341                    # just return `[ ]'.
342                    obj = createElement(token.macroName)
343                    obj.contextDepth = token.contextDepth
344                    obj.parentNode = token.parentNode
345                    tokens = obj.invoke(self)
346                    if tokens is None:
347#                       log.info('expanding %s %s', token.macroName, obj)
348                        pushToken(obj)
349                    elif tokens:
350#                       log.info('expanding %s %s', token.macroName, ''.join([x.source for x in tokens]))
351                        pushTokens(tokens)
352                    continue
353                except Exception as message:
354                    msg = str(message)
355                    if msg.strip():
356                        msg = ' (%s)' % msg.strip()
357                    log.error('Error while expanding "%s"%s%s',
358                              token.macroName, self.lineInfo, msg)
359                    raise
360
361#           tokenlog.debug('%s: %s', type(token), token.ownerDocument)
362
363            yield token
364
365    def createSubProcess(self):
366        """
367        Create a TeX instance using the same document context
368
369        """
370        # Push a new context for cleanup later
371        tok = ArgumentContext()
372        self.ownerDocument.context.push(tok)
373        tex = type(self)(ownerDocument=self.ownerDocument)
374        tex._endcontext = tok
375        return tex
376
377    def endSubProcess(self):
378        """
379        End the context of a sub-interpreter
380
381        See Also:
382        createSubProcess()
383
384        """
385        if hasattr(self, '_endcontext'):
386            self.ownerDocument.context.pop(self._endcontext)
387
388    def expandTokens(self, tokens, normalize=False, parentNode=None):
389        """
390        Expand a list of unexpanded tokens
391
392        This can be used to expand tokens in a macro argument without
393        having them sent to the output stream.
394
395        Required Arguments:
396        tokens -- list of tokens
397
398        Returns:
399        `TeXFragment' populated with expanded tokens
400
401        """
402        tex = self.createSubProcess()
403
404        # Push the tokens and expand them
405        tex.pushTokens(tokens)
406        frag = tex.ownerDocument.createDocumentFragment()
407        frag.parentNode = parentNode
408        out = tex.parse(frag)
409
410        # Pop all of our nested contexts off
411        tex.endSubProcess()
412
413        if normalize:
414            out.normalize(getattr(tex.ownerDocument, 'charsubs', []))
415
416        return out
417
418
419    def parse(self, output=None):
420        """
421        Parse stream content until it is empty
422
423        Keyword Arguments:
424        output -- object to put the content in.  This should be either
425            a TeXDocument or a TeXFragment
426
427        Returns:
428        `TeXDocument' instance
429
430        """
431        tokens = bufferediter(self)
432
433        if output is None:
434            output = self.ownerDocument
435
436        try:
437            for item in tokens:
438                if item.nodeType == Macro.ELEMENT_NODE:
439                    item.parentNode = output
440                    item.digest(tokens)
441                output.append(item)
442        except Exception as message:
443            msg = str(message)
444            if msg.strip():
445               msg = ' (%s)' % msg.strip()
446            log.error('An error occurred while building the document object%s%s', self.lineInfo, msg)
447            raise
448
449        if self.toplevel:
450            for callback in self.ownerDocument.postParseCallbacks:
451                callback()
452        return output
453
454    def textTokens(self, text):
455        """
456        Return a list of `Other` tokens from a string
457
458        Required Arguments:
459        text -- string containing text to be tokenized
460
461        """
462        return [Other(x) for x in text]
463
464    def pushToken(self, token):
465        """
466        Push a token back into the token buffer to be re-read
467
468        This method also pops an item off of the output token stream.
469
470        Required Arguments:
471        token -- token to push back
472
473        """
474        if token is not None:
475            if not self.inputs:
476                self.input([token])
477            else:
478                self.inputs[-1][0].pushToken(token)
479
480    def pushTokens(self, tokens):
481        """
482        Push a list of tokens back into the token buffer to be re-read
483
484        Required Arguments:
485        tokens -- list of tokens
486
487        """
488        if tokens:
489            if not self.inputs:
490                self.input(tokens)
491            else:
492                self.inputs[-1][0].pushTokens(tokens)
493
494    def source(self, tokens):
495        """
496        Return the TeX source representation of the tokens
497
498        Required Arguments:
499        tokens -- list of tokens
500
501        Returns:
502        string containing the TeX source
503
504        """
505        return ''.join([x.source for x in tokens])
506
507    def normalize(self, tokens):
508        """
509        Join consecutive character tokens into a string
510
511        Required Arguments:
512        tokens -- list of tokens
513
514        Returns:
515        string unless the tokens contain values that cannot be casted
516        to a string.  In that case, the original tokens are returned
517        in a TeXFragment instance
518
519        """
520        if tokens is None:
521            return tokens
522
523        grouptokens = [Token.CC_EGROUP, Token.CC_BGROUP]
524        textTokens = [Token.CC_LETTER, Token.CC_OTHER,
525                      Token.CC_EGROUP, Token.CC_BGROUP,
526                      Token.CC_SPACE]
527
528        try: iter(tokens)
529        except TypeError: return tokens
530
531        for t in tokens:
532            if isinstance(t, str):
533                continue
534            # Element nodes can't be part of normalized text
535            if t.nodeType == Macro.ELEMENT_NODE:
536                if len(tokens) == 1:
537                    return tokens.pop()
538                t = self.ownerDocument.createDocumentFragment()
539                t.extend(tokens)
540                t.normalize(getattr(self.ownerDocument, 'charsubs', []))
541                return t
542            if t.catcode not in textTokens:
543                if len(tokens) == 1:
544                    return tokens.pop()
545                t = self.ownerDocument.createDocumentFragment()
546                t.ownerDocument = self.ownerDocument
547                t.parentNode = None
548                t.extend(tokens)
549                t.normalize(getattr(self.ownerDocument, 'charsubs', []))
550                return t
551
552        return (''.join([x for x in tokens
553                          if getattr(x, 'catcode', Token.CC_OTHER)
554                             not in grouptokens])).strip()
555
556    def processIfContent(self, which, debug=False):
557        """
558        Process the requested portion of the `if' block
559
560        Required Arguments:
561        which -- the case to return.  If this is a boolean, a value of
562            `True' will return the first part of the `if' block.  If it
563            is `False', it will return the `else' portion.  If this is
564            an integer, the `case' matching this integer will be returned.
565
566        """
567        # Since the true content always comes first, we need to set
568        # True to case 0 and False to case 1.
569        elsefound = False
570        if isinstance(which, bool):
571            if which: which = 0
572            else: which = 1
573
574        cases = [[]]
575        nesting = 0
576
577        for t in self.itertokens():
578            name = getattr(t, 'macroName', '') or ''
579            if name.startswith('if'):
580                cases[-1].append(t)
581                nesting += 1
582            elif name == 'fi':
583                if nesting > 1:
584                    cases[-1].append(t)
585                elif not nesting:
586                    break
587                nesting -= 1
588            elif not(nesting) and name == 'else':
589                cases.append([])
590                continue
591            elif not(nesting) and name == 'or':
592                cases.append([])
593                continue
594            else:
595                cases[-1].append(t)
596
597        # else case for ifs without elses
598        cases.append([])
599
600        # Push if-selected tokens back into tokenizer
601        self.pushTokens(cases[which])
602
603    def readArgument(self, *args, **kwargs):
604        """
605        Return an argument without the TeX source that created it
606
607        See Also:
608        self.readArgumentAndSource()
609
610        """
611        return self.readArgumentAndSource(*args, **kwargs)[0]
612
613    def readArgumentAndSource(self, spec=None, type=None, subtype=None,
614                    delim=',', expanded=False, default=None, parentNode=None,
615                    name=None, stripLeadingWhitespace=True):
616        """
617        Get an argument and the TeX source that created it
618
619        Optional Arguments:
620        spec -- string containing information about the type of
621            argument to get.  If it is 'None', the next token is
622            returned.  If it is a two-character string, a grouping
623            delimited by those two characters is returned (i.e. '[]').
624            If it is a single-character string, the stream is checked
625            to see if the next character is the one specified.
626            In all cases, if the specified argument is not found,
627            'None' is returned.
628        type -- data type to cast the argument to.  New types can be
629            added to the self.argtypes dictionary.  The key should
630            match this 'type' argument and the value should be a callable
631            object that takes a list of tokens as the first argument
632            and a list of unspecified keyword arguments (i.e. **kwargs)
633            for type specific information such as list delimiters.
634        subtype -- data type to use for elements of a list or dictionary
635        delim -- item delimiter for list and dictionary types
636        expanded -- boolean indicating whether the argument content
637            should be expanded or just returned as an unexpanded
638            text string
639        default -- value to return if the argument doesn't exist
640        parentNode -- the node that the argument belongs to
641        name -- the name of the argument being parsed
642        stripLeadingWhitespace -- if True, whitespace is skipped before
643            looking for the argument
644
645        Returns:
646        tuple where the first argument is:
647
648        None -- if the argument wasn't found
649        object of type `type` -- if `type` was specified
650        list of tokens -- for all other arguments
651
652        The second argument is a string containing the TeX source
653        for the argument.
654
655        """
656        if stripLeadingWhitespace:
657            self.readOptionalSpaces()
658
659        # Disable expansion of parameters
660        ParameterCommand.disable()
661
662        if type in ['Dimen','Length','Dimension']:
663            n = self.readDimen()
664            ParameterCommand.enable()
665            return n, n.source
666
667        if type in ['MuDimen','MuLength']:
668            n = self.readMuDimen()
669            ParameterCommand.enable()
670            return n, n.source
671
672        if type in ['Glue','Skip']:
673            n = self.readGlue()
674            ParameterCommand.enable()
675            return n, n.source
676
677        if type in ['MuGlue','MuSkip']:
678            n = self.readMuGlue()
679            ParameterCommand.enable()
680            return n, n.source
681
682        if type in ['Number','Int','Integer']:
683            n = self.readNumber()
684            ParameterCommand.enable()
685            return n, n.source
686
687        if type in ['Token','Tok']:
688            for tok in self.itertokens():
689                ParameterCommand.enable()
690                return tok, tok.source
691
692        if type in ['XTok','XToken']:
693            self.ownerDocument.context.warnOnUnrecognized = False
694            for t in self.itertokens():
695                if t.catcode == Token.CC_BGROUP:
696                    self.pushToken(t)
697                    toks, source = self.readToken(True)
698                    if len(toks) == 1:
699                        ParameterCommand.enable()
700                        return toks[0], toks[0].source
701                    ParameterCommand.enable()
702                    return toks, source
703                else:
704                    toks = self.expandTokens([t], parentNode=parentNode)
705                    if len(toks) == 1:
706                        ParameterCommand.enable()
707                        return toks[0], toks[0].source
708                    ParameterCommand.enable()
709                    return toks, self.source(toks)
710
711        # Definition argument string
712        if type in ['Args']:
713            args = []
714            for t in self.itertokens():
715                if t.catcode == Token.CC_BGROUP:
716                    self.pushToken(t)
717                    break
718                else:
719                    args.append(t)
720            else: pass
721            ParameterCommand.enable()
722            return args, self.source(args)
723
724        if type in ['any']:
725            toks = []
726            for t in self.itertokens():
727                if t is None or t == '':
728                    continue
729                if t.catcode == Token.CC_SPACE:
730                    break
731                toks.append(t)
732            return self.expandTokens(toks, parentNode=parentNode), self.source(toks)
733
734        if type in ['cs']:
735            expanded = False
736
737        priorcodes = {}
738
739        try:
740            # Set catcodes for this argument type
741            try:
742                if isinstance(self.argtypes[type], (list,tuple)):
743                    for key, value in list(self.argtypes[type][1].items()):
744                        priorcodes[key] = self.ownerDocument.context.whichCode(key)
745                        self.ownerDocument.context.catcode(key, value)
746            except KeyError:
747                pass
748
749            # Get a TeX token (i.e. {...})
750            if spec is None:
751                toks, source = self.readToken(expanded, parentNode=parentNode)
752
753            # Get a single character argument
754            elif len(spec) == 1:
755                toks, source = self.readCharacter(spec)
756
757            # Get an argument grouped by the given characters (e.g. [...], (...))
758            elif len(spec) == 2:
759                toks, source = self.readGrouping(spec, expanded, parentNode=parentNode)
760
761            # This isn't a correct value
762            else:
763                raise ValueError('Unrecognized specifier "%s"' % spec)
764
765        except Exception as msg:
766            log.error('Error while reading argument "%s" of %s%s (%s)' % \
767                          (name, parentNode.nodeName, self.lineInfo, msg))
768            raise
769
770        # Set catcodes back to original values
771        for key, value in list(priorcodes.items()):
772            self.ownerDocument.context.catcode(key, value)
773
774        if toks is None:
775            ParameterCommand.enable()
776            return default, ''
777        res = self.cast(toks, type, subtype, delim, parentNode, name)
778
779        # Normalize any document fragments
780        if expanded and \
781           getattr(res,'nodeType',None) == Macro.DOCUMENT_FRAGMENT_NODE:
782            res.normalize(getattr(self.ownerDocument, 'charsubs', []))
783
784        # Re-enable Parameters
785        ParameterCommand.enable()
786
787        if False and parentNode is not None:
788            log.warning('%s %s: %s', parentNode.nodeName, name, source)
789            log.warning('categories: %s', self.ownerDocument.context.categories)
790            log.warning('stack: %s', self.ownerDocument.context.top)
791
792        return res, source
793
794    def readToken(self, expanded=False, parentNode=None):
795        """
796        Read a token or token group
797
798        Returns:
799        two element tuple containing the parsed tokens and the
800        TeX code that they came from
801
802        """
803        tokens = self.itertokens()
804        isgroup = False
805        for t in tokens:
806            toks = []
807            source = [t]
808            # A { ... } grouping was found
809            if t.catcode == Token.CC_BGROUP:
810                isgroup = True
811                level = 1
812                for t in tokens:
813                    source.append(t)
814                    if t.catcode == Token.CC_BGROUP:
815                        toks.append(t)
816                        level += 1
817                    elif t.catcode == Token.CC_EGROUP:
818                        level -= 1
819                        if level == 0:
820                            break
821                        toks.append(t)
822                    else:
823                        toks.append(t)
824            # A math token was found (i.e., $ ... $)
825            elif t.catcode == Token.CC_MATHSHIFT or isinstance(t, MathShift):
826                toks.append(t)
827                for t in tokens:
828                    source.append(t)
829                    toks.append(t)
830                    if t.catcode == Token.CC_MATHSHIFT or isinstance(t, MathShift):
831                        break
832            else:
833                toks.append(t)
834
835            # Expand macros and get the argument source string
836            if expanded:
837                toks = self.expandTokens(toks, parentNode=parentNode)
838                if isgroup:
839                    s = self.source(toks)
840                    source = '%s%s%s' % (source[0].source, s,
841                                          source[-1].source)
842                else:
843                    source = self.source(toks)
844            else:
845                source = self.source(source)
846
847            return toks, source
848
849        return None, ''
850
851    def readCharacter(self, char):
852        """
853        Read a character from the stream
854
855        Required Arguments:
856        char -- the character that is expected
857
858        Returns:
859        two element tuple containing the parsed token and the
860        TeX code that it came from
861
862        """
863        for t in self.itertokens():
864            if t == char:
865                return t, self.source([t])
866            else:
867                self.pushToken(t)
868                break
869        return None, ''
870
871    def readGrouping(self, chars, expanded=False, parentNode=None):
872        """
873        Read a group delimited by the given characters
874
875        Keyword Arguments:
876        chars -- the two characters that begin and end the group
877
878        Returns:
879        two element tuple containing the parsed tokens and the
880        TeX code that they came from
881
882        """
883        tokens = self.itertokens()
884        begin, end = Other(chars[0]), Other(chars[1])
885        source = []
886        for t in tokens:
887            toks = []
888            source = [t]
889            # A [ ... ], ( ... ), etc. grouping was found
890            if t.catcode != Token.CC_ESCAPE and \
891               (t == begin or str(t) == str(begin)):
892                level = 1
893                for t in tokens:
894                    source.append(t)
895                    if t.catcode != Token.CC_ESCAPE and \
896                       (t == begin or str(t) == str(begin)):
897                        toks.append(t)
898                        level += 1
899                    elif t.catcode != Token.CC_ESCAPE and \
900                         (t == end or str(t) == str(end)):
901                        level -= 1
902                        if level == 0:
903                            break
904                        toks.append(t)
905                    else:
906                        toks.append(t)
907            else:
908                self.pushToken(t)
909                break
910            if expanded:
911                toks = self.expandTokens(toks, parentNode=parentNode)
912                source = begin + self.source(toks) + end
913            else:
914                source = self.source(source)
915            return toks, source
916        return None, ''
917
918    def readInternalType(self, tokens, method):
919        """
920        Read an internal type from the given tokens
921
922        Required Arguments:
923        tokens -- list of tokens that contain the internal value
924        method -- reference to the method to parse the tokens
925
926        Returns:
927        instance of the TeX type
928
929        """
930        # Throw a \relax in here to keep the token after the
931        # argument from being expanded when parsing the internal type
932        self.pushToken(EscapeSequence('relax'))
933        self.pushTokens(tokens)
934
935        # Call the appropriate parsing method for this type
936        result = method()
937
938        # Get rid of the \relax token inserted above
939        for t in self.itertokens():
940            if (t.nodeType == Token.ELEMENT_NODE and t.nodeName == 'relax') \
941               or t.macroName == 'relax':
942                break
943
944        return result
945
946    def cast(self, tokens, dtype, subtype=None, delim=',',
947                   parentNode=None, name=None):
948        """
949        Cast the tokens to the appropriate type
950
951        This method is used to convert tokens into Python objects.
952        This happens when the user has specified that a macro argument
953        should be a dictionary (e.g. foo:dict),
954        a list (e.g. foo:list), etc.
955
956        Required Arguments:
957        tokens -- list of raw, unflattened and unnormalized tokens
958        dtype -- reference to the requested datatype
959
960        Optional Arguments:
961        subtype -- data type for elements of a list or dictionary
962        delim -- delimiter character for list and dictionary types
963
964        Returns:
965        object of the specified type
966
967        """
968        argtypes = {}
969        for key, t in list(self.argtypes.items()):
970            if isinstance(t, tuple):
971                argtypes[key] = t[0]
972            else:
973                argtypes[key] = t
974
975        # No type specified
976        if dtype is None:
977            pass
978
979        # Could not find specified type
980        elif dtype not in list(argtypes.keys()):
981            log.warning('Could not find datatype "%s"' % dtype)
982
983        # Casting to specified type
984        else:
985            tokens = argtypes[dtype](tokens, subtype=subtype,
986                     delim=delim, parentNode=parentNode, name=name)
987
988        # Set parent node as needed
989        if getattr(tokens,'nodeType',None) == Macro.DOCUMENT_FRAGMENT_NODE:
990            tokens.parentNode = parentNode
991
992        return tokens
993
994    def castNone(self, tokens, **kwargs):
995        return tokens
996
997    def castControlSequence(self, tokens, **kwargs):
998        """
999        Limit the argument to a single non-space token
1000
1001        Required Arguments:
1002        tokens -- list of tokens to cast
1003
1004        See Also:
1005        self.readArgument()
1006        self.cast()
1007
1008        """
1009
1010        return [x for x in tokens if x.catcode == Token.CC_ESCAPE].pop(0)
1011
1012    def castString(self, tokens, type=str, **kwargs):
1013        """
1014        Join the tokens into a string
1015
1016        Required Arguments:
1017        tokens -- list of tokens to cast
1018
1019        Keyword Arguments:
1020        type -- the string class to use for the returned object
1021
1022        Returns:
1023        string
1024
1025        See Also:
1026        self.readArgument()
1027        self.cast()
1028
1029        """
1030        return type(self.normalize(tokens))
1031
1032    def castLabel(self, tokens, **kwargs):
1033        """
1034        Join the tokens into a string and set a label in the context
1035
1036        Required Arguments:
1037        tokens -- list of tokens to cast
1038
1039        Returns:
1040        string
1041
1042        See Also:
1043        self.readArgument()
1044        self.cast()
1045        self.castRef()
1046
1047        """
1048        label = self.castString(tokens, **kwargs)
1049        self.ownerDocument.context.label(label)
1050        return label
1051
1052    def castRef(self, tokens, **kwargs):
1053        """
1054        Join the tokens into a string and set a reference in the context
1055
1056        Required Arguments:
1057        tokens -- list of tokens to cast
1058
1059        Returns:
1060        string
1061
1062        See Also:
1063        self.readArgument()
1064        self.cast()
1065        self.castLabel()
1066
1067        """
1068        ref = self.castString(tokens, **kwargs)
1069        self.ownerDocument.context.ref(kwargs['parentNode'], kwargs['name'], ref)
1070        return ref
1071
1072    def castNumber(self, tokens, **kwargs):
1073        """
1074        Join the tokens into a string and turn the result into an integer
1075
1076        Required Arguments:
1077        tokens -- list of tokens to cast
1078
1079        Keyword Arguments:
1080        type -- the integer class to use for the returned object
1081
1082        Returns:
1083        integer
1084
1085        See Also:
1086        self.readArgument()
1087        self.cast()
1088
1089        """
1090#       try: return number(self.castString(tokens, **kwargs))
1091#       except: return number(0)
1092        return self.readInternalType(tokens, self.readNumber)
1093
1094    def castDecimal(self, tokens, **kwargs):
1095        """
1096        Join the tokens into a string and turn the result into a float
1097
1098        Required Arguments:
1099        tokens -- list of tokens to cast
1100
1101        Keyword Arguments:
1102        type -- the float class to use for the returned object
1103
1104        Returns:
1105        float
1106
1107        See Also:
1108        self.readArgument()
1109        self.cast()
1110
1111        """
1112#       try: return self.castString(tokens, **kwargs)
1113#       except: return decimal(0)
1114        return self.readInternalType(tokens, self.readDecimal)
1115
1116    def castDimen(self, tokens, **kwargs):
1117        """
1118        Jain the tokens into a string and convert the result into a `dimen`
1119
1120        Required Arguments:
1121        tokens -- list of tokens to cast
1122
1123        Returns:
1124        `dimen` instance
1125
1126        See Also:
1127        self.readArgument()
1128        self.cast()
1129
1130        """
1131#       try: return dimen(self.castString(tokens, **kwargs))
1132#       except: return dimen(0)
1133        return self.readInternalType(tokens, self.readDimen)
1134
1135    def castMuDimen(self, tokens, **kwargs):
1136        """
1137        Jain the tokens into a string and convert the result into a `MuDimen`
1138
1139        Required Arguments:
1140        tokens -- list of tokens to cast
1141
1142        Returns:
1143        `MuDimen` instance
1144
1145        See Also:
1146        self.readArgument()
1147        self.cast()
1148
1149        """
1150#       try: return mudimen(self.castString(tokens, **kwargs))
1151#       except: return mudimen(0)
1152        return self.readInternalType(tokens, self.readMuDimen)
1153
1154    def castGlue(self, tokens, **kwargs):
1155        """
1156        Jain the tokens into a string and convert the result into a `Glue`
1157
1158        Required Arguments:
1159        tokens -- list of tokens to cast
1160
1161        Returns:
1162        `Glue` instance
1163
1164        See Also:
1165        self.readArgument()
1166        self.cast()
1167
1168        """
1169#       try: return glue(self.castString(tokens, **kwargs))
1170#       except: return glue(0)
1171        return self.readInternalType(tokens, self.readGlue)
1172
1173    def castMuGlue(self, tokens, **kwargs):
1174        """
1175        Jain the tokens into a string and convert the result into a `MuGlue`
1176
1177        Required Arguments:
1178        tokens -- list of tokens to cast
1179
1180        Returns:
1181        `MuGlue` instance
1182
1183        See Also:
1184        self.readArgument()
1185        self.cast()
1186
1187        """
1188#       try: return muglue(self.castString(tokens, **kwargs))
1189#       except: return muglue(0)
1190        return self.readInternalType(tokens, self.readMuGlue)
1191
1192    def castList(self, tokens, type=list, **kwargs):
1193        """
1194        Parse items delimited by the given delimiter into a list
1195
1196        Required Arguments:
1197        tokens -- TeXFragment of tokens to cast
1198
1199        Keyword Arguments:
1200        type -- the list class to use for the returned object
1201        delim -- the delimiter that separates each element of the list.
1202            The default delimiter is ','.
1203
1204        Returns:
1205        list
1206
1207        See Also:
1208        self.readArgument()
1209        self.cast()
1210
1211        """
1212        delim = kwargs.get('delim')
1213        if delim is None:
1214            delim = ','
1215        subtype = kwargs.get('subtype')
1216        listarg = [[]]
1217        while tokens:
1218            current = tokens.pop(0)
1219
1220            # Item delimiter
1221            if current == delim:
1222                listarg.append([])
1223
1224            # Found grouping
1225            elif current.catcode == Token.CC_BGROUP:
1226                level = 1
1227                listarg[-1].append(current)
1228                while tokens:
1229                    current = tokens.pop(0)
1230                    if current.catcode == Token.CC_BGROUP:
1231                        level += 1
1232                    elif current.catcode == Token.CC_EGROUP:
1233                        level -= 1
1234                        if not level:
1235                            break
1236                    listarg[-1].append(current)
1237                listarg[-1].append(current)
1238
1239            else:
1240                listarg[-1].append(current)
1241
1242        return type([self.normalize(self.cast(x, subtype)) for x in listarg])
1243
1244    def castDictionary(self, tokens, type=dict, **kwargs):
1245        """
1246        Parse key/value pairs into a dictionary
1247
1248        Required Arguments:
1249        tokens -- TeXFragment of tokens to cast
1250
1251        Keyword Arguments:
1252        type -- the dictionary class to use for the returned object
1253        delim -- the delimiter that separates each element of the list.
1254            The default delimiter is ','.
1255
1256        Returns:
1257        dictionary
1258
1259        See Also:
1260        self.readArgument()
1261        self.cast()
1262
1263        """
1264        delim = kwargs.get('delim')
1265        if delim is None:
1266            delim = ','
1267        subtype = kwargs.get('subtype')
1268        dictarg = type()
1269        currentkey = []
1270        currentvalue = None
1271        while tokens:
1272            current = tokens.pop(0)
1273
1274            if current.nodeType == Macro.ELEMENT_NODE:
1275                currentvalue.append(current)
1276                continue
1277
1278            # Found grouping
1279            elif current.catcode == Token.CC_BGROUP:
1280                level = 1
1281                currentvalue.append(current)
1282                while tokens:
1283                    current = tokens.pop(0)
1284                    if current.catcode == Token.CC_BGROUP:
1285                        level += 1
1286                    elif current.catcode == Token.CC_EGROUP:
1287                        level -= 1
1288                        if not level:
1289                            break
1290                    currentvalue.append(current)
1291                currentvalue.append(current)
1292                continue
1293
1294            # Found end-of-key delimiter
1295            if current == '=':
1296                currentvalue = []
1297
1298            # Found end-of-value delimiter
1299            elif current == delim:
1300                # Handle this later
1301                pass
1302
1303            # Extend key
1304            elif currentvalue is None:
1305                currentkey.append(current)
1306
1307            # Extend value
1308            else:
1309                currentvalue.append(current)
1310
1311            # Found end-of-value delimiter
1312            if current == delim or not tokens:
1313                currentkey = self.normalize(currentkey)
1314                currentvalue = self.normalize(self.cast(currentvalue, subtype))
1315                if currentvalue is None:
1316                    currentvalue = True
1317                dictarg[currentkey] = currentvalue
1318                currentkey = []
1319                currentvalue = None
1320
1321        if currentkey:
1322            currentkey = self.normalize(currentkey)
1323            currentvalue = self.normalize(self.cast(currentvalue, subtype))
1324            if currentvalue is None:
1325                currentvalue = True
1326            dictarg[currentkey] = currentvalue
1327
1328        return dictarg
1329
1330    def kpsewhich(self, name):
1331        """
1332        Locate the given file using kpsewhich
1333
1334        Required Arguments:
1335        name -- name of file to find
1336
1337        Returns:
1338        full path to file -- if it is found
1339
1340        """
1341        # When, for example, ``\Input{name}`` is encountered, we should look in
1342        # the directory containing the file being processed. So the following
1343        # code adds the directory to the start of $TEXINPUTS.
1344        TEXINPUTS = None
1345        try:
1346            srcDir = os.path.dirname(self.filename)
1347        except AttributeError:
1348            # I think this happens only for the command line file.
1349            pass
1350        else:
1351            TEXINPUTS = os.environ.get("TEXINPUTS",'')
1352            os.environ["TEXINPUTS"] = "%s%s%s%s" % (srcDir, os.path.pathsep, TEXINPUTS, os.path.pathsep)
1353
1354        try:
1355            program = self.ownerDocument.config['general']['kpsewhich']
1356
1357            kwargs = {'stdout':subprocess.PIPE}
1358            if sys.platform.lower().startswith('win'):
1359                kwargs['shell'] = True
1360
1361            output = subprocess.Popen([program, name], **kwargs).communicate()[0].strip()
1362            output = output.decode('utf-8')
1363            if output:
1364                return output
1365
1366        except:
1367            fullname = ''
1368            paths = os.environ.get("TEXINPUTS", '.').split(os.path.pathsep)
1369            for path in [x for x in paths if x]:
1370                if name in os.listdir(path):
1371                    fullname = os.path.join(path,name)
1372                    break
1373            if fullname:
1374                return fullname
1375
1376        # Undo any mods to $TEXINPUTS.
1377        if TEXINPUTS:
1378            os.environ["TEXINPUTS"] = TEXINPUTS
1379
1380        raise OSError('Could not find any file named: %s' % name)
1381
1382#
1383# Parsing helper methods for parsing numbers, spaces, dimens, etc.
1384#
1385
1386    def readOptionalSpaces(self):
1387        """ Remove all whitespace """
1388        tokens = []
1389        for t in self.itertokens():
1390            if t.nodeType == t.ELEMENT_NODE:
1391                self.pushToken(t)
1392                break
1393            elif t is None or t == '':
1394                continue
1395            elif t.catcode != Token.CC_SPACE:
1396                self.pushToken(t)
1397                break
1398            tokens.append(t)
1399        return tokens
1400
1401    def readKeyword(self, words, optspace=True):
1402        """
1403        Read keyword from the stream
1404
1405        Required Arguments:
1406        words -- list of possible keywords to get from the stream
1407
1408        Keyword Arguments:
1409        optspace -- boolean indicating if it should eat an optional
1410            space token after a matched keyword
1411
1412        Returns:
1413        matched keyword -- if one is found
1414        `None' -- if none of the keywords are found
1415
1416        """
1417        self.readOptionalSpaces()
1418        for word in words:
1419            matched = []
1420            letters = list(word.upper())
1421            for t in self.itertokens():
1422                if t.nodeType == Token.ELEMENT_NODE:
1423                    break
1424                matched.append(t)
1425                if t.upper() == letters[0]:
1426                    letters.pop(0)
1427                    if not letters:
1428                        if optspace:
1429                            self.readOneOptionalSpace()
1430                        return word
1431                else:
1432                    break
1433            self.pushTokens(matched)
1434        return None
1435
1436    def readDecimal(self):
1437        """ Read a decimal number from the stream """
1438        sign = self.readOptionalSigns()
1439        for t in self:
1440            if t.nodeType == Token.ELEMENT_NODE:
1441                self.pushToken(t)
1442                break
1443            if t in string.digits:
1444                num = t + self.readSequence(string.digits, False)
1445                for t in self:
1446                    if t.nodeType == Token.ELEMENT_NODE:
1447                        self.pushToken(t)
1448                        return sign * float(num)
1449                    elif t in '.,':
1450                        num += '.' + self.readSequence(string.digits, default='0')
1451                    else:
1452                        self.pushToken(t)
1453                        return sign * float(num)
1454                    break
1455                return sign * float(num)
1456            if t in '.,':
1457                return sign * float('.' + self.readSequence(string.digits, default='0'))
1458            if t in '\'"`':
1459                self.pushToken(t)
1460                return sign * self.readInteger()
1461            break
1462        log.warning('Missing decimal%s, treating as `0`.', self.lineInfo)
1463        return float(0)
1464
1465    def readDimen(self, units=dimen.units):
1466        """
1467        Read a dimension from the stream
1468
1469        Keyword Arguments:
1470        units -- list of acceptable units of measure
1471
1472        Returns:
1473        `dimen' instance
1474
1475        """
1476        ParameterCommand.disable()
1477        sign = self.readOptionalSigns()
1478        for t in self:
1479            if t.nodeType == Macro.ELEMENT_NODE and \
1480               isinstance(t, ParameterCommand):
1481                ParameterCommand.enable()
1482                return dimen(sign * dimen(t))
1483            self.pushToken(t)
1484            break
1485        num = dimen(sign * self.readDecimal() * self.readUnitOfMeasure(units=units))
1486        ParameterCommand.enable()
1487        return num
1488
1489    def readMuDimen(self):
1490        """ Read a mudimen from the stream """
1491        return mudimen(self.readDimen(units=mudimen.units))
1492
1493    def readUnitOfMeasure(self, units):
1494        """
1495        Read a unit of measure from the stream
1496
1497        Required Arguments:
1498        units -- list of acceptable units of measure
1499
1500        Returns:
1501        `dimen' instance
1502
1503        """
1504        self.readOptionalSpaces()
1505        ParameterCommand.disable()
1506        # internal unit
1507        for t in self:
1508            if t.nodeType == Macro.ELEMENT_NODE and \
1509               isinstance(t, ParameterCommand):
1510                ParameterCommand.enable()
1511                return dimen(t)
1512            self.pushToken(t)
1513            break
1514        true = self.readKeyword(['true'])
1515        unit = self.readKeyword(units)
1516        if unit is None:
1517            log.warning('Missing unit (expecting %s)%s, treating as `%s`',
1518                        ', '.join(units), self.lineInfo, units[0])
1519            unit = units[0]
1520        ParameterCommand.enable()
1521        return dimen('1%s' % unit)
1522
1523    def readOptionalSigns(self):
1524        """
1525        Read optional + and - signs
1526
1527        Returns:
1528        +1 or -1
1529
1530        """
1531        sign = 1
1532        self.readOptionalSpaces()
1533        for t in self:
1534            if t.nodeType == Token.ELEMENT_NODE:
1535                self.pushToken(t)
1536                break
1537            elif t == '+':
1538                pass
1539            elif t == '-':
1540                sign = -sign
1541            elif t is None or t == '' or t.catcode == Token.CC_SPACE:
1542                continue
1543            else:
1544                self.pushToken(t)
1545                break
1546        return sign
1547
1548    def readOneOptionalSpace(self):
1549        """ Read one optional space from the stream """
1550        for t in self.itertokens():
1551            if t.nodeType == Token.ELEMENT_NODE:
1552                self.pushToken(t)
1553                return None
1554            if t is None or t == '':
1555                continue
1556            if t.catcode == Token.CC_SPACE:
1557                return t
1558            self.pushToken(t)
1559            return None
1560
1561    def readSequence(self, chars, optspace=True, default=''):
1562        """
1563        Read a sequence of characters from a given set
1564
1565        Required Arguments:
1566        chars -- sequence of characters that should be accepted
1567
1568        Keyword Arguments:
1569        optspace -- boolean indicating if an optional space should
1570            be absorbed after the sequence of characters
1571        default -- string to return if none of the characters in
1572            the given set are found
1573
1574        Returns:
1575        string of characters matching those in the sequence `chars'
1576        or `default' if none are found
1577
1578        """
1579        output = []
1580        for t in self:
1581            if t.nodeType == Macro.ELEMENT_NODE:
1582                self.pushToken(t)
1583                break
1584            if t not in chars:
1585                if optspace and t.catcode == Token.CC_SPACE:
1586                    pass
1587                else:
1588                    self.pushToken(t)
1589                break
1590            output.append(t)
1591        if not output:
1592            return default
1593        return ''.join(output)
1594
1595    def readInteger(self, optspace=True):
1596        """
1597        Read an integer from the stream
1598
1599        Returns:
1600        `number` instance
1601
1602        """
1603        ParameterCommand.disable()
1604        num = None
1605        sign = self.readOptionalSigns()
1606
1607        for t in self:
1608            # internal/coerced integers
1609            if t.nodeType == Macro.ELEMENT_NODE:
1610                if isinstance(t, ParameterCommand):
1611                    num = number(sign * number(t))
1612                else:
1613                    self.pushToken(t)
1614                    break
1615            # integer constant
1616            elif t in string.digits:
1617                num = number(sign * int(t + self.readSequence(string.digits,
1618                                                              optspace=optspace)))
1619                for t in self:
1620                    if t.nodeType == Macro.ELEMENT_NODE and \
1621                       isinstance(t, ParameterCommand):
1622                        num = number(num * number(t))
1623                    else:
1624                        self.pushToken(t)
1625                    break
1626            # octal constant
1627            elif t == "'":
1628                num = number(sign * int('0' + self.readSequence(string.octdigits,
1629                                                   default='0', optspace=optspace), 8))
1630            # hex constant
1631            elif t == '"':
1632                num = number(sign * int('0x' + self.readSequence(string.hexdigits,
1633                                               default='0', optspace=optspace), 16))
1634            # character token
1635            elif t == '`':
1636                for t in self.itertokens():
1637                    num = number(sign * ord(t))
1638                    break
1639            break
1640        ParameterCommand.enable()
1641        if num is not None:
1642            return num
1643        log.warning('Missing number%s, treating as `0`. (%s)', self.lineInfo, t)
1644        return number(0)
1645
1646    readNumber = readInteger
1647
1648    def readGlue(self):
1649        """ Read a glue parameter from the stream """
1650        ParameterCommand.disable()
1651        sign = self.readOptionalSigns()
1652        # internal/coerced glue
1653        for t in self:
1654            if t.nodeType == Macro.ELEMENT_NODE and \
1655               isinstance(t, ParameterCommand):
1656                ParameterCommand.enable()
1657                return glue(sign * glue(t))
1658            self.pushToken(t)
1659            break
1660        dim = self.readDimen()
1661        stretch = self.readStretch()
1662        shrink = self.readShrink()
1663        ParameterCommand.enable()
1664        return glue(sign*dim, stretch, shrink)
1665
1666    def readStretch(self):
1667        """ Read a stretch parameter from the stream """
1668        if self.readKeyword(['plus']):
1669            return self.readDimen(units=dimen.units+['filll','fill','fil'])
1670        return None
1671
1672    def readShrink(self):
1673        """ Read a shrink parameter from the stream """
1674        if self.readKeyword(['minus']):
1675            return self.readDimen(units=dimen.units+['filll','fill','fil'])
1676        return None
1677
1678    def readMuGlue(self):
1679        """ Read a muglue parameter from the stream """
1680        ParameterCommand.disable()
1681        sign = self.readOptionalSigns()
1682        # internal/coerced muglue
1683        for t in self:
1684            if t.nodeType == Macro.ELEMENT_NODE and \
1685               isinstance(t, ParameterCommand):
1686                ParameterCommand.enable()
1687                return muglue(sign * muglue(t))
1688            self.pushToken(t)
1689            break
1690        dim = self.readMuDimen()
1691        stretch = self.readMuStretch()
1692        shrink = self.readMuShrink()
1693        ParameterCommand.enable()
1694        return muglue(sign*dim, stretch, shrink)
1695
1696    def readMuStretch(self):
1697        """ Read a mustretch parameter from the stream """
1698        if self.readKeyword(['plus']):
1699            return self.readDimen(units=mudimen.units+['filll','fill','fil'])
1700        return None
1701
1702    def readMuShrink(self):
1703        """ Read a mushrink parameter from the stream """
1704        if self.readKeyword(['minus']):
1705            return self.readDimen(units=mudimen.units+['filll','fill','fil'])
1706        return None
1707
1708    def loadAuxiliaryFile(self):
1709        """ Read in an auxiliary file (only once) """
1710        if self.jobname in self.auxFiles:
1711            return
1712        self.auxFiles.append(self.jobname)
1713        warn = self.ownerDocument.context.warnOnUnrecognized
1714        try:
1715            f = self.kpsewhich(self.jobname+'.aux')
1716            self.ownerDocument.context.warnOnUnrecognized = False
1717            dummy = plasTeX.Command()
1718            self.pushToken(dummy)
1719            self.input(open(f))
1720            for item in self:
1721                if item is dummy:
1722                    break
1723        except OSError as msg:
1724            log.warning(msg)
1725        self.ownerDocument.context.warnOnUnrecognized = warn
1726
1727#   @property
1728#   def jobname(self):
1729#       """ Return the basename of the main input file """
1730#       print self.inputs
1731#       return os.path.basename(os.path.splitext(self.inputs[0][0].filename)[0])
1732