1# -----------------------------------------------------------------------------
2# ply: lex.py
3#
4# Copyright (C) 2001-2015,
5# David M. Beazley (Dabeaz LLC)
6# All rights reserved.
7#
8# SPDX-License-Identifier: BSD-3-Clause
9# -----------------------------------------------------------------------------
10
11__version__    = '3.8'
12__tabversion__ = '3.8'
13
14import re
15import sys
16import types
17import copy
18import os
19import inspect
20
21# This tuple contains known string types
22try:
23    # Python 2.6
24    StringTypes = (types.StringType, types.UnicodeType)
25except AttributeError:
26    # Python 3.0
27    StringTypes = (str, bytes)
28
29# This regular expression is used to match valid token names
30_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
31
32# Exception thrown when invalid token encountered and no default error
33# handler is defined.
34class LexError(Exception):
35    def __init__(self, message, s):
36        self.args = (message,)
37        self.text = s
38
39
40# Token class.  This class is used to represent the tokens produced.
41class LexToken(object):
42    def __str__(self):
43        return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
44
45    def __repr__(self):
46        return str(self)
47
48
49# This object is a stand-in for a logging object created by the
50# logging module.
51
52class PlyLogger(object):
53    def __init__(self, f):
54        self.f = f
55
56    def critical(self, msg, *args, **kwargs):
57        self.f.write((msg % args) + '\n')
58
59    def warning(self, msg, *args, **kwargs):
60        self.f.write('WARNING: ' + (msg % args) + '\n')
61
62    def error(self, msg, *args, **kwargs):
63        self.f.write('ERROR: ' + (msg % args) + '\n')
64
65    info = critical
66    debug = critical
67
68
69# Null logger is used when no output is generated. Does nothing.
70class NullLogger(object):
71    def __getattribute__(self, name):
72        return self
73
74    def __call__(self, *args, **kwargs):
75        return self
76
77
78# -----------------------------------------------------------------------------
79#                        === Lexing Engine ===
80#
81# The following Lexer class implements the lexer runtime.   There are only
82# a few public methods and attributes:
83#
84#    input()          -  Store a new string in the lexer
85#    token()          -  Get the next token
86#    clone()          -  Clone the lexer
87#
88#    lineno           -  Current line number
89#    lexpos           -  Current position in the input string
90# -----------------------------------------------------------------------------
91
92class Lexer:
93    def __init__(self):
94        self.lexre = None             # Master regular expression. This is a list of
95                                      # tuples (re, findex) where re is a compiled
96                                      # regular expression and findex is a list
97                                      # mapping regex group numbers to rules
98        self.lexretext = None         # Current regular expression strings
99        self.lexstatere = {}          # Dictionary mapping lexer states to master regexs
100        self.lexstateretext = {}      # Dictionary mapping lexer states to regex strings
101        self.lexstaterenames = {}     # Dictionary mapping lexer states to symbol names
102        self.lexstate = 'INITIAL'     # Current lexer state
103        self.lexstatestack = []       # Stack of lexer states
104        self.lexstateinfo = None      # State information
105        self.lexstateignore = {}      # Dictionary of ignored characters for each state
106        self.lexstateerrorf = {}      # Dictionary of error functions for each state
107        self.lexstateeoff = {}        # Dictionary of eof functions for each state
108        self.lexreflags = 0           # Optional re compile flags
109        self.lexdata = None           # Actual input data (as a string)
110        self.lexpos = 0               # Current position in input text
111        self.lexlen = 0               # Length of the input text
112        self.lexerrorf = None         # Error rule (if any)
113        self.lexeoff = None           # EOF rule (if any)
114        self.lextokens = None         # List of valid tokens
115        self.lexignore = ''           # Ignored characters
116        self.lexliterals = ''         # Literal characters that can be passed through
117        self.lexmodule = None         # Module
118        self.lineno = 1               # Current line number
119        self.lexoptimize = False      # Optimized mode
120
121    def clone(self, object=None):
122        c = copy.copy(self)
123
124        # If the object parameter has been supplied, it means we are attaching the
125        # lexer to a new object.  In this case, we have to rebind all methods in
126        # the lexstatere and lexstateerrorf tables.
127
128        if object:
129            newtab = {}
130            for key, ritem in self.lexstatere.items():
131                newre = []
132                for cre, findex in ritem:
133                    newfindex = []
134                    for f in findex:
135                        if not f or not f[0]:
136                            newfindex.append(f)
137                            continue
138                        newfindex.append((getattr(object, f[0].__name__), f[1]))
139                newre.append((cre, newfindex))
140                newtab[key] = newre
141            c.lexstatere = newtab
142            c.lexstateerrorf = {}
143            for key, ef in self.lexstateerrorf.items():
144                c.lexstateerrorf[key] = getattr(object, ef.__name__)
145            c.lexmodule = object
146        return c
147
148    # ------------------------------------------------------------
149    # writetab() - Write lexer information to a table file
150    # ------------------------------------------------------------
151    def writetab(self, lextab, outputdir=''):
152        if isinstance(lextab, types.ModuleType):
153            raise IOError("Won't overwrite existing lextab module")
154        basetabmodule = lextab.split('.')[-1]
155        filename = os.path.join(outputdir, basetabmodule) + '.py'
156        with open(filename, 'w') as tf:
157            tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
158            tf.write('_tabversion   = %s\n' % repr(__tabversion__))
159            tf.write('_lextokens    = %s\n' % repr(self.lextokens))
160            tf.write('_lexreflags   = %s\n' % repr(self.lexreflags))
161            tf.write('_lexliterals  = %s\n' % repr(self.lexliterals))
162            tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
163
164            # Rewrite the lexstatere table, replacing function objects with function names
165            tabre = {}
166            for statename, lre in self.lexstatere.items():
167                titem = []
168                for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
169                    titem.append((retext, _funcs_to_names(func, renames)))
170                tabre[statename] = titem
171
172            tf.write('_lexstatere   = %s\n' % repr(tabre))
173            tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
174
175            taberr = {}
176            for statename, ef in self.lexstateerrorf.items():
177                taberr[statename] = ef.__name__ if ef else None
178            tf.write('_lexstateerrorf = %s\n' % repr(taberr))
179
180            tabeof = {}
181            for statename, ef in self.lexstateeoff.items():
182                tabeof[statename] = ef.__name__ if ef else None
183            tf.write('_lexstateeoff = %s\n' % repr(tabeof))
184
185    # ------------------------------------------------------------
186    # readtab() - Read lexer information from a tab file
187    # ------------------------------------------------------------
188    def readtab(self, tabfile, fdict):
189        if isinstance(tabfile, types.ModuleType):
190            lextab = tabfile
191        else:
192            exec('import %s' % tabfile)
193            lextab = sys.modules[tabfile]
194
195        if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
196            raise ImportError('Inconsistent PLY version')
197
198        self.lextokens      = lextab._lextokens
199        self.lexreflags     = lextab._lexreflags
200        self.lexliterals    = lextab._lexliterals
201        self.lextokens_all  = self.lextokens | set(self.lexliterals)
202        self.lexstateinfo   = lextab._lexstateinfo
203        self.lexstateignore = lextab._lexstateignore
204        self.lexstatere     = {}
205        self.lexstateretext = {}
206        for statename, lre in lextab._lexstatere.items():
207            titem = []
208            txtitem = []
209            for pat, func_name in lre:
210                titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
211
212            self.lexstatere[statename] = titem
213            self.lexstateretext[statename] = txtitem
214
215        self.lexstateerrorf = {}
216        for statename, ef in lextab._lexstateerrorf.items():
217            self.lexstateerrorf[statename] = fdict[ef]
218
219        self.lexstateeoff = {}
220        for statename, ef in lextab._lexstateeoff.items():
221            self.lexstateeoff[statename] = fdict[ef]
222
223        self.begin('INITIAL')
224
225    # ------------------------------------------------------------
226    # input() - Push a new string into the lexer
227    # ------------------------------------------------------------
228    def input(self, s):
229        # Pull off the first character to see if s looks like a string
230        c = s[:1]
231        if not isinstance(c, StringTypes):
232            raise ValueError('Expected a string')
233        self.lexdata = s
234        self.lexpos = 0
235        self.lexlen = len(s)
236
237    # ------------------------------------------------------------
238    # begin() - Changes the lexing state
239    # ------------------------------------------------------------
240    def begin(self, state):
241        if state not in self.lexstatere:
242            raise ValueError('Undefined state')
243        self.lexre = self.lexstatere[state]
244        self.lexretext = self.lexstateretext[state]
245        self.lexignore = self.lexstateignore.get(state, '')
246        self.lexerrorf = self.lexstateerrorf.get(state, None)
247        self.lexeoff = self.lexstateeoff.get(state, None)
248        self.lexstate = state
249
250    # ------------------------------------------------------------
251    # push_state() - Changes the lexing state and saves old on stack
252    # ------------------------------------------------------------
253    def push_state(self, state):
254        self.lexstatestack.append(self.lexstate)
255        self.begin(state)
256
257    # ------------------------------------------------------------
258    # pop_state() - Restores the previous state
259    # ------------------------------------------------------------
260    def pop_state(self):
261        self.begin(self.lexstatestack.pop())
262
263    # ------------------------------------------------------------
264    # current_state() - Returns the current lexing state
265    # ------------------------------------------------------------
266    def current_state(self):
267        return self.lexstate
268
269    # ------------------------------------------------------------
270    # skip() - Skip ahead n characters
271    # ------------------------------------------------------------
272    def skip(self, n):
273        self.lexpos += n
274
275    # ------------------------------------------------------------
276    # opttoken() - Return the next token from the Lexer
277    #
278    # Note: This function has been carefully implemented to be as fast
279    # as possible.  Don't make changes unless you really know what
280    # you are doing
281    # ------------------------------------------------------------
282    def token(self):
283        # Make local copies of frequently referenced attributes
284        lexpos    = self.lexpos
285        lexlen    = self.lexlen
286        lexignore = self.lexignore
287        lexdata   = self.lexdata
288
289        while lexpos < lexlen:
290            # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
291            if lexdata[lexpos] in lexignore:
292                lexpos += 1
293                continue
294
295            # Look for a regular expression match
296            for lexre, lexindexfunc in self.lexre:
297                m = lexre.match(lexdata, lexpos)
298                if not m:
299                    continue
300
301                # Create a token for return
302                tok = LexToken()
303                tok.value = m.group()
304                tok.lineno = self.lineno
305                tok.lexpos = lexpos
306
307                i = m.lastindex
308                func, tok.type = lexindexfunc[i]
309
310                if not func:
311                    # If no token type was set, it's an ignored token
312                    if tok.type:
313                        self.lexpos = m.end()
314                        return tok
315                    else:
316                        lexpos = m.end()
317                        break
318
319                lexpos = m.end()
320
321                # If token is processed by a function, call it
322
323                tok.lexer = self      # Set additional attributes useful in token rules
324                self.lexmatch = m
325                self.lexpos = lexpos
326
327                newtok = func(tok)
328
329                # Every function must return a token, if nothing, we just move to next token
330                if not newtok:
331                    lexpos    = self.lexpos         # This is here in case user has updated lexpos.
332                    lexignore = self.lexignore      # This is here in case there was a state change
333                    break
334
335                # Verify type of the token.  If not in the token map, raise an error
336                if not self.lexoptimize:
337                    if newtok.type not in self.lextokens_all:
338                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
339                            func.__code__.co_filename, func.__code__.co_firstlineno,
340                            func.__name__, newtok.type), lexdata[lexpos:])
341
342                return newtok
343            else:
344                # No match, see if in literals
345                if lexdata[lexpos] in self.lexliterals:
346                    tok = LexToken()
347                    tok.value = lexdata[lexpos]
348                    tok.lineno = self.lineno
349                    tok.type = tok.value
350                    tok.lexpos = lexpos
351                    self.lexpos = lexpos + 1
352                    return tok
353
354                # No match. Call t_error() if defined.
355                if self.lexerrorf:
356                    tok = LexToken()
357                    tok.value = self.lexdata[lexpos:]
358                    tok.lineno = self.lineno
359                    tok.type = 'error'
360                    tok.lexer = self
361                    tok.lexpos = lexpos
362                    self.lexpos = lexpos
363                    newtok = self.lexerrorf(tok)
364                    if lexpos == self.lexpos:
365                        # Error method didn't change text position at all. This is an error.
366                        raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
367                    lexpos = self.lexpos
368                    if not newtok:
369                        continue
370                    return newtok
371
372                self.lexpos = lexpos
373                raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
374
375        if self.lexeoff:
376            tok = LexToken()
377            tok.type = 'eof'
378            tok.value = ''
379            tok.lineno = self.lineno
380            tok.lexpos = lexpos
381            tok.lexer = self
382            self.lexpos = lexpos
383            newtok = self.lexeoff(tok)
384            return newtok
385
386        self.lexpos = lexpos + 1
387        if self.lexdata is None:
388            raise RuntimeError('No input string given with input()')
389        return None
390
391    # Iterator interface
392    def __iter__(self):
393        return self
394
395    def next(self):
396        t = self.token()
397        if t is None:
398            raise StopIteration
399        return t
400
401    __next__ = next
402
403# -----------------------------------------------------------------------------
404#                           ==== Lex Builder ===
405#
406# The functions and classes below are used to collect lexing information
407# and build a Lexer object from it.
408# -----------------------------------------------------------------------------
409
410# -----------------------------------------------------------------------------
411# _get_regex(func)
412#
413# Returns the regular expression assigned to a function either as a doc string
414# or as a .regex attribute attached by the @TOKEN decorator.
415# -----------------------------------------------------------------------------
416def _get_regex(func):
417    return getattr(func, 'regex', func.__doc__)
418
419# -----------------------------------------------------------------------------
420# get_caller_module_dict()
421#
422# This function returns a dictionary containing all of the symbols defined within
423# a caller further down the call stack.  This is used to get the environment
424# associated with the yacc() call if none was provided.
425# -----------------------------------------------------------------------------
426def get_caller_module_dict(levels):
427    f = sys._getframe(levels)
428    ldict = f.f_globals.copy()
429    if f.f_globals != f.f_locals:
430        ldict.update(f.f_locals)
431    return ldict
432
433# -----------------------------------------------------------------------------
434# _funcs_to_names()
435#
436# Given a list of regular expression functions, this converts it to a list
437# suitable for output to a table file
438# -----------------------------------------------------------------------------
439def _funcs_to_names(funclist, namelist):
440    result = []
441    for f, name in zip(funclist, namelist):
442        if f and f[0]:
443            result.append((name, f[1]))
444        else:
445            result.append(f)
446    return result
447
448# -----------------------------------------------------------------------------
449# _names_to_funcs()
450#
451# Given a list of regular expression function names, this converts it back to
452# functions.
453# -----------------------------------------------------------------------------
454def _names_to_funcs(namelist, fdict):
455    result = []
456    for n in namelist:
457        if n and n[0]:
458            result.append((fdict[n[0]], n[1]))
459        else:
460            result.append(n)
461    return result
462
463# -----------------------------------------------------------------------------
464# _form_master_re()
465#
466# This function takes a list of all of the regex components and attempts to
467# form the master regular expression.  Given limitations in the Python re
468# module, it may be necessary to break the master regex into separate expressions.
469# -----------------------------------------------------------------------------
470def _form_master_re(relist, reflags, ldict, toknames):
471    if not relist:
472        return []
473    regex = '|'.join(relist)
474    try:
475        lexre = re.compile(regex, re.VERBOSE | reflags)
476
477        # Build the index to function map for the matching engine
478        lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
479        lexindexnames = lexindexfunc[:]
480
481        for f, i in lexre.groupindex.items():
482            handle = ldict.get(f, None)
483            if type(handle) in (types.FunctionType, types.MethodType):
484                lexindexfunc[i] = (handle, toknames[f])
485                lexindexnames[i] = f
486            elif handle is not None:
487                lexindexnames[i] = f
488                if f.find('ignore_') > 0:
489                    lexindexfunc[i] = (None, None)
490                else:
491                    lexindexfunc[i] = (None, toknames[f])
492
493        return [(lexre, lexindexfunc)], [regex], [lexindexnames]
494    except Exception:
495        m = int(len(relist)/2)
496        if m == 0:
497            m = 1
498        llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
499        rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
500        return (llist+rlist), (lre+rre), (lnames+rnames)
501
502# -----------------------------------------------------------------------------
503# def _statetoken(s,names)
504#
505# Given a declaration name s of the form "t_" and a dictionary whose keys are
506# state names, this function returns a tuple (states,tokenname) where states
507# is a tuple of state names and tokenname is the name of the token.  For example,
508# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
509# -----------------------------------------------------------------------------
510def _statetoken(s, names):
511    nonstate = 1
512    parts = s.split('_')
513    for i, part in enumerate(parts[1:], 1):
514        if part not in names and part != 'ANY':
515            break
516
517    if i > 1:
518        states = tuple(parts[1:i])
519    else:
520        states = ('INITIAL',)
521
522    if 'ANY' in states:
523        states = tuple(names)
524
525    tokenname = '_'.join(parts[i:])
526    return (states, tokenname)
527
528
529# -----------------------------------------------------------------------------
530# LexerReflect()
531#
532# This class represents information needed to build a lexer as extracted from a
533# user's input file.
534# -----------------------------------------------------------------------------
535class LexerReflect(object):
536    def __init__(self, ldict, log=None, reflags=0):
537        self.ldict      = ldict
538        self.error_func = None
539        self.tokens     = []
540        self.reflags    = reflags
541        self.stateinfo  = {'INITIAL': 'inclusive'}
542        self.modules    = set()
543        self.error      = False
544        self.log        = PlyLogger(sys.stderr) if log is None else log
545
546    # Get all of the basic information
547    def get_all(self):
548        self.get_tokens()
549        self.get_literals()
550        self.get_states()
551        self.get_rules()
552
553    # Validate all of the information
554    def validate_all(self):
555        self.validate_tokens()
556        self.validate_literals()
557        self.validate_rules()
558        return self.error
559
560    # Get the tokens map
561    def get_tokens(self):
562        tokens = self.ldict.get('tokens', None)
563        if not tokens:
564            self.log.error('No token list is defined')
565            self.error = True
566            return
567
568        if not isinstance(tokens, (list, tuple)):
569            self.log.error('tokens must be a list or tuple')
570            self.error = True
571            return
572
573        if not tokens:
574            self.log.error('tokens is empty')
575            self.error = True
576            return
577
578        self.tokens = tokens
579
580    # Validate the tokens
581    def validate_tokens(self):
582        terminals = {}
583        for n in self.tokens:
584            if not _is_identifier.match(n):
585                self.log.error("Bad token name '%s'", n)
586                self.error = True
587            if n in terminals:
588                self.log.warning("Token '%s' multiply defined", n)
589            terminals[n] = 1
590
591    # Get the literals specifier
592    def get_literals(self):
593        self.literals = self.ldict.get('literals', '')
594        if not self.literals:
595            self.literals = ''
596
597    # Validate literals
598    def validate_literals(self):
599        try:
600            for c in self.literals:
601                if not isinstance(c, StringTypes) or len(c) > 1:
602                    self.log.error('Invalid literal %s. Must be a single character', repr(c))
603                    self.error = True
604
605        except TypeError:
606            self.log.error('Invalid literals specification. literals must be a sequence of characters')
607            self.error = True
608
609    def get_states(self):
610        self.states = self.ldict.get('states', None)
611        # Build statemap
612        if self.states:
613            if not isinstance(self.states, (tuple, list)):
614                self.log.error('states must be defined as a tuple or list')
615                self.error = True
616            else:
617                for s in self.states:
618                    if not isinstance(s, tuple) or len(s) != 2:
619                        self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
620                        self.error = True
621                        continue
622                    name, statetype = s
623                    if not isinstance(name, StringTypes):
624                        self.log.error('State name %s must be a string', repr(name))
625                        self.error = True
626                        continue
627                    if not (statetype == 'inclusive' or statetype == 'exclusive'):
628                        self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
629                        self.error = True
630                        continue
631                    if name in self.stateinfo:
632                        self.log.error("State '%s' already defined", name)
633                        self.error = True
634                        continue
635                    self.stateinfo[name] = statetype
636
637    # Get all of the symbols with a t_ prefix and sort them into various
638    # categories (functions, strings, error functions, and ignore characters)
639
640    def get_rules(self):
641        tsymbols = [f for f in self.ldict if f[:2] == 't_']
642
643        # Now build up a list of functions and a list of strings
644        self.toknames = {}        # Mapping of symbols to token names
645        self.funcsym  = {}        # Symbols defined as functions
646        self.strsym   = {}        # Symbols defined as strings
647        self.ignore   = {}        # Ignore strings by state
648        self.errorf   = {}        # Error functions by state
649        self.eoff     = {}        # EOF functions by state
650
651        for s in self.stateinfo:
652            self.funcsym[s] = []
653            self.strsym[s] = []
654
655        if len(tsymbols) == 0:
656            self.log.error('No rules of the form t_rulename are defined')
657            self.error = True
658            return
659
660        for f in tsymbols:
661            t = self.ldict[f]
662            states, tokname = _statetoken(f, self.stateinfo)
663            self.toknames[f] = tokname
664
665            if hasattr(t, '__call__'):
666                if tokname == 'error':
667                    for s in states:
668                        self.errorf[s] = t
669                elif tokname == 'eof':
670                    for s in states:
671                        self.eoff[s] = t
672                elif tokname == 'ignore':
673                    line = t.__code__.co_firstlineno
674                    file = t.__code__.co_filename
675                    self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
676                    self.error = True
677                else:
678                    for s in states:
679                        self.funcsym[s].append((f, t))
680            elif isinstance(t, StringTypes):
681                if tokname == 'ignore':
682                    for s in states:
683                        self.ignore[s] = t
684                    if '\\' in t:
685                        self.log.warning("%s contains a literal backslash '\\'", f)
686
687                elif tokname == 'error':
688                    self.log.error("Rule '%s' must be defined as a function", f)
689                    self.error = True
690                else:
691                    for s in states:
692                        self.strsym[s].append((f, t))
693            else:
694                self.log.error('%s not defined as a function or string', f)
695                self.error = True
696
697        # Sort the functions by line number
698        for f in self.funcsym.values():
699            f.sort(key=lambda x: x[1].__code__.co_firstlineno)
700
701        # Sort the strings by regular expression length
702        for s in self.strsym.values():
703            s.sort(key=lambda x: len(x[1]), reverse=True)
704
705    # Validate all of the t_rules collected
706    def validate_rules(self):
707        for state in self.stateinfo:
708            # Validate all rules defined by functions
709
710            for fname, f in self.funcsym[state]:
711                line = f.__code__.co_firstlineno
712                file = f.__code__.co_filename
713                module = inspect.getmodule(f)
714                self.modules.add(module)
715
716                tokname = self.toknames[fname]
717                if isinstance(f, types.MethodType):
718                    reqargs = 2
719                else:
720                    reqargs = 1
721                nargs = f.__code__.co_argcount
722                if nargs > reqargs:
723                    self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
724                    self.error = True
725                    continue
726
727                if nargs < reqargs:
728                    self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
729                    self.error = True
730                    continue
731
732                if not _get_regex(f):
733                    self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
734                    self.error = True
735                    continue
736
737                try:
738                    c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
739                    if c.match(''):
740                        self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
741                        self.error = True
742                except re.error as e:
743                    self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
744                    if '#' in _get_regex(f):
745                        self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
746                    self.error = True
747
748            # Validate all rules defined by strings
749            for name, r in self.strsym[state]:
750                tokname = self.toknames[name]
751                if tokname == 'error':
752                    self.log.error("Rule '%s' must be defined as a function", name)
753                    self.error = True
754                    continue
755
756                if tokname not in self.tokens and tokname.find('ignore_') < 0:
757                    self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
758                    self.error = True
759                    continue
760
761                try:
762                    c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
763                    if (c.match('')):
764                        self.log.error("Regular expression for rule '%s' matches empty string", name)
765                        self.error = True
766                except re.error as e:
767                    self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
768                    if '#' in r:
769                        self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
770                    self.error = True
771
772            if not self.funcsym[state] and not self.strsym[state]:
773                self.log.error("No rules defined for state '%s'", state)
774                self.error = True
775
776            # Validate the error function
777            efunc = self.errorf.get(state, None)
778            if efunc:
779                f = efunc
780                line = f.__code__.co_firstlineno
781                file = f.__code__.co_filename
782                module = inspect.getmodule(f)
783                self.modules.add(module)
784
785                if isinstance(f, types.MethodType):
786                    reqargs = 2
787                else:
788                    reqargs = 1
789                nargs = f.__code__.co_argcount
790                if nargs > reqargs:
791                    self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
792                    self.error = True
793
794                if nargs < reqargs:
795                    self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
796                    self.error = True
797
798        for module in self.modules:
799            self.validate_module(module)
800
801    # -----------------------------------------------------------------------------
802    # validate_module()
803    #
804    # This checks to see if there are duplicated t_rulename() functions or strings
805    # in the parser input file.  This is done using a simple regular expression
806    # match on each line in the source code of the given module.
807    # -----------------------------------------------------------------------------
808
809    def validate_module(self, module):
810        lines, linen = inspect.getsourcelines(module)
811
812        fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
813        sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
814
815        counthash = {}
816        linen += 1
817        for line in lines:
818            m = fre.match(line)
819            if not m:
820                m = sre.match(line)
821            if m:
822                name = m.group(1)
823                prev = counthash.get(name)
824                if not prev:
825                    counthash[name] = linen
826                else:
827                    filename = inspect.getsourcefile(module)
828                    self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
829                    self.error = True
830            linen += 1
831
832# -----------------------------------------------------------------------------
833# lex(module)
834#
835# Build all of the regular expression rules from definitions in the supplied module
836# -----------------------------------------------------------------------------
837def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
838        reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
839
840    if lextab is None:
841        lextab = 'lextab'
842
843    global lexer
844
845    ldict = None
846    stateinfo  = {'INITIAL': 'inclusive'}
847    lexobj = Lexer()
848    lexobj.lexoptimize = optimize
849    global token, input
850
851    if errorlog is None:
852        errorlog = PlyLogger(sys.stderr)
853
854    if debug:
855        if debuglog is None:
856            debuglog = PlyLogger(sys.stderr)
857
858    # Get the module dictionary used for the lexer
859    if object:
860        module = object
861
862    # Get the module dictionary used for the parser
863    if module:
864        _items = [(k, getattr(module, k)) for k in dir(module)]
865        ldict = dict(_items)
866        # If no __file__ attribute is available, try to obtain it from the __module__ instead
867        if '__file__' not in ldict:
868            ldict['__file__'] = sys.modules[ldict['__module__']].__file__
869    else:
870        ldict = get_caller_module_dict(2)
871
872    # Determine if the module is package of a package or not.
873    # If so, fix the tabmodule setting so that tables load correctly
874    pkg = ldict.get('__package__')
875    if pkg and isinstance(lextab, str):
876        if '.' not in lextab:
877            lextab = pkg + '.' + lextab
878
879    # Collect parser information from the dictionary
880    linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
881    linfo.get_all()
882    if not optimize:
883        if linfo.validate_all():
884            raise SyntaxError("Can't build lexer")
885
886    if optimize and lextab:
887        try:
888            lexobj.readtab(lextab, ldict)
889            token = lexobj.token
890            input = lexobj.input
891            lexer = lexobj
892            return lexobj
893
894        except ImportError:
895            pass
896
897    # Dump some basic debugging information
898    if debug:
899        debuglog.info('lex: tokens   = %r', linfo.tokens)
900        debuglog.info('lex: literals = %r', linfo.literals)
901        debuglog.info('lex: states   = %r', linfo.stateinfo)
902
903    # Build a dictionary of valid token names
904    lexobj.lextokens = set()
905    for n in linfo.tokens:
906        lexobj.lextokens.add(n)
907
908    # Get literals specification
909    if isinstance(linfo.literals, (list, tuple)):
910        lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
911    else:
912        lexobj.lexliterals = linfo.literals
913
914    lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
915
916    # Get the stateinfo dictionary
917    stateinfo = linfo.stateinfo
918
919    regexs = {}
920    # Build the master regular expressions
921    for state in stateinfo:
922        regex_list = []
923
924        # Add rules defined by functions first
925        for fname, f in linfo.funcsym[state]:
926            line = f.__code__.co_firstlineno
927            file = f.__code__.co_filename
928            regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
929            if debug:
930                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
931
932        # Now add all of the simple rules
933        for name, r in linfo.strsym[state]:
934            regex_list.append('(?P<%s>%s)' % (name, r))
935            if debug:
936                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
937
938        regexs[state] = regex_list
939
940    # Build the master regular expressions
941
942    if debug:
943        debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
944
945    for state in regexs:
946        lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
947        lexobj.lexstatere[state] = lexre
948        lexobj.lexstateretext[state] = re_text
949        lexobj.lexstaterenames[state] = re_names
950        if debug:
951            for i, text in enumerate(re_text):
952                debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
953
954    # For inclusive states, we need to add the regular expressions from the INITIAL state
955    for state, stype in stateinfo.items():
956        if state != 'INITIAL' and stype == 'inclusive':
957            lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
958            lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
959            lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
960
961    lexobj.lexstateinfo = stateinfo
962    lexobj.lexre = lexobj.lexstatere['INITIAL']
963    lexobj.lexretext = lexobj.lexstateretext['INITIAL']
964    lexobj.lexreflags = reflags
965
966    # Set up ignore variables
967    lexobj.lexstateignore = linfo.ignore
968    lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
969
970    # Set up error functions
971    lexobj.lexstateerrorf = linfo.errorf
972    lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
973    if not lexobj.lexerrorf:
974        errorlog.warning('No t_error rule is defined')
975
976    # Set up eof functions
977    lexobj.lexstateeoff = linfo.eoff
978    lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
979
980    # Check state information for ignore and error rules
981    for s, stype in stateinfo.items():
982        if stype == 'exclusive':
983            if s not in linfo.errorf:
984                errorlog.warning("No error rule is defined for exclusive state '%s'", s)
985            if s not in linfo.ignore and lexobj.lexignore:
986                errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
987        elif stype == 'inclusive':
988            if s not in linfo.errorf:
989                linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
990            if s not in linfo.ignore:
991                linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
992
993    # Create global versions of the token() and input() functions
994    token = lexobj.token
995    input = lexobj.input
996    lexer = lexobj
997
998    # If in optimize mode, we write the lextab
999    if lextab and optimize:
1000        if outputdir is None:
1001            # If no output directory is set, the location of the output files
1002            # is determined according to the following rules:
1003            #     - If lextab specifies a package, files go into that package directory
1004            #     - Otherwise, files go in the same directory as the specifying module
1005            if isinstance(lextab, types.ModuleType):
1006                srcfile = lextab.__file__
1007            else:
1008                if '.' not in lextab:
1009                    srcfile = ldict['__file__']
1010                else:
1011                    parts = lextab.split('.')
1012                    pkgname = '.'.join(parts[:-1])
1013                    exec('import %s' % pkgname)
1014                    srcfile = getattr(sys.modules[pkgname], '__file__', '')
1015            outputdir = os.path.dirname(srcfile)
1016        try:
1017            lexobj.writetab(lextab, outputdir)
1018        except IOError as e:
1019            errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
1020
1021    return lexobj
1022
1023# -----------------------------------------------------------------------------
1024# runmain()
1025#
1026# This runs the lexer as a main program
1027# -----------------------------------------------------------------------------
1028
1029def runmain(lexer=None, data=None):
1030    if not data:
1031        try:
1032            filename = sys.argv[1]
1033            f = open(filename)
1034            data = f.read()
1035            f.close()
1036        except IndexError:
1037            sys.stdout.write('Reading from standard input (type EOF to end):\n')
1038            data = sys.stdin.read()
1039
1040    if lexer:
1041        _input = lexer.input
1042    else:
1043        _input = input
1044    _input(data)
1045    if lexer:
1046        _token = lexer.token
1047    else:
1048        _token = token
1049
1050    while True:
1051        tok = _token()
1052        if not tok:
1053            break
1054        sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
1055
1056# -----------------------------------------------------------------------------
1057# @TOKEN(regex)
1058#
1059# This decorator function can be used to set the regex expression on a function
1060# when its docstring might need to be set in an alternative way
1061# -----------------------------------------------------------------------------
1062
1063def TOKEN(r):
1064    def set_regex(f):
1065        if hasattr(r, '__call__'):
1066            f.regex = _get_regex(r)
1067        else:
1068            f.regex = r
1069        return f
1070    return set_regex
1071
1072# Alternative spelling of the TOKEN decorator
1073Token = TOKEN
1074
1075