1# ----------------------------------------------------------------------------- 2# ply: lex.py 3# 4# Copyright (C) 2001-2018 5# David M. Beazley (Dabeaz LLC) 6# All rights reserved. 7# 8# Redistribution and use in source and binary forms, with or without 9# modification, are permitted provided that the following conditions are 10# met: 11# 12# * Redistributions of source code must retain the above copyright notice, 13# this list of conditions and the following disclaimer. 14# * Redistributions in binary form must reproduce the above copyright notice, 15# this list of conditions and the following disclaimer in the documentation 16# and/or other materials provided with the distribution. 17# * Neither the name of the David Beazley or Dabeaz LLC may be used to 18# endorse or promote products derived from this software without 19# specific prior written permission. 20# 21# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32# ----------------------------------------------------------------------------- 33 34__version__ = '3.11' 35__tabversion__ = '3.10' 36 37import re 38import sys 39import types 40import copy 41import os 42import inspect 43 44# This tuple contains known string types 45try: 46 # Python 2.6 47 StringTypes = (types.StringType, types.UnicodeType) 48except AttributeError: 49 # Python 3.0 50 StringTypes = (str, bytes) 51 52# This regular expression is used to match valid token names 53_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') 54 55# Exception thrown when invalid token encountered and no default error 56# handler is defined. 57class LexError(Exception): 58 def __init__(self, message, s): 59 self.args = (message,) 60 self.text = s 61 62 63# Token class. This class is used to represent the tokens produced. 64class LexToken(object): 65 def __str__(self): 66 return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) 67 68 def __repr__(self): 69 return str(self) 70 71 72# This object is a stand-in for a logging object created by the 73# logging module. 74 75class PlyLogger(object): 76 def __init__(self, f): 77 self.f = f 78 79 def critical(self, msg, *args, **kwargs): 80 self.f.write((msg % args) + '\n') 81 82 def warning(self, msg, *args, **kwargs): 83 self.f.write('WARNING: ' + (msg % args) + '\n') 84 85 def error(self, msg, *args, **kwargs): 86 self.f.write('ERROR: ' + (msg % args) + '\n') 87 88 info = critical 89 debug = critical 90 91 92# Null logger is used when no output is generated. Does nothing. 93class NullLogger(object): 94 def __getattribute__(self, name): 95 return self 96 97 def __call__(self, *args, **kwargs): 98 return self 99 100 101# ----------------------------------------------------------------------------- 102# === Lexing Engine === 103# 104# The following Lexer class implements the lexer runtime. There are only 105# a few public methods and attributes: 106# 107# input() - Store a new string in the lexer 108# token() - Get the next token 109# clone() - Clone the lexer 110# 111# lineno - Current line number 112# lexpos - Current position in the input string 113# ----------------------------------------------------------------------------- 114 115class Lexer: 116 def __init__(self): 117 self.lexre = None # Master regular expression. This is a list of 118 # tuples (re, findex) where re is a compiled 119 # regular expression and findex is a list 120 # mapping regex group numbers to rules 121 self.lexretext = None # Current regular expression strings 122 self.lexstatere = {} # Dictionary mapping lexer states to master regexs 123 self.lexstateretext = {} # Dictionary mapping lexer states to regex strings 124 self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names 125 self.lexstate = 'INITIAL' # Current lexer state 126 self.lexstatestack = [] # Stack of lexer states 127 self.lexstateinfo = None # State information 128 self.lexstateignore = {} # Dictionary of ignored characters for each state 129 self.lexstateerrorf = {} # Dictionary of error functions for each state 130 self.lexstateeoff = {} # Dictionary of eof functions for each state 131 self.lexreflags = 0 # Optional re compile flags 132 self.lexdata = None # Actual input data (as a string) 133 self.lexpos = 0 # Current position in input text 134 self.lexlen = 0 # Length of the input text 135 self.lexerrorf = None # Error rule (if any) 136 self.lexeoff = None # EOF rule (if any) 137 self.lextokens = None # List of valid tokens 138 self.lexignore = '' # Ignored characters 139 self.lexliterals = '' # Literal characters that can be passed through 140 self.lexmodule = None # Module 141 self.lineno = 1 # Current line number 142 self.lexoptimize = False # Optimized mode 143 144 def clone(self, object=None): 145 c = copy.copy(self) 146 147 # If the object parameter has been supplied, it means we are attaching the 148 # lexer to a new object. In this case, we have to rebind all methods in 149 # the lexstatere and lexstateerrorf tables. 150 151 if object: 152 newtab = {} 153 for key, ritem in self.lexstatere.items(): 154 newre = [] 155 for cre, findex in ritem: 156 newfindex = [] 157 for f in findex: 158 if not f or not f[0]: 159 newfindex.append(f) 160 continue 161 newfindex.append((getattr(object, f[0].__name__), f[1])) 162 newre.append((cre, newfindex)) 163 newtab[key] = newre 164 c.lexstatere = newtab 165 c.lexstateerrorf = {} 166 for key, ef in self.lexstateerrorf.items(): 167 c.lexstateerrorf[key] = getattr(object, ef.__name__) 168 c.lexmodule = object 169 return c 170 171 # ------------------------------------------------------------ 172 # writetab() - Write lexer information to a table file 173 # ------------------------------------------------------------ 174 def writetab(self, lextab, outputdir=''): 175 if isinstance(lextab, types.ModuleType): 176 raise IOError("Won't overwrite existing lextab module") 177 basetabmodule = lextab.split('.')[-1] 178 filename = os.path.join(outputdir, basetabmodule) + '.py' 179 with open(filename, 'w') as tf: 180 tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) 181 tf.write('_tabversion = %s\n' % repr(__tabversion__)) 182 tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) 183 tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags))) 184 tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) 185 tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) 186 187 # Rewrite the lexstatere table, replacing function objects with function names 188 tabre = {} 189 for statename, lre in self.lexstatere.items(): 190 titem = [] 191 for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): 192 titem.append((retext, _funcs_to_names(func, renames))) 193 tabre[statename] = titem 194 195 tf.write('_lexstatere = %s\n' % repr(tabre)) 196 tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) 197 198 taberr = {} 199 for statename, ef in self.lexstateerrorf.items(): 200 taberr[statename] = ef.__name__ if ef else None 201 tf.write('_lexstateerrorf = %s\n' % repr(taberr)) 202 203 tabeof = {} 204 for statename, ef in self.lexstateeoff.items(): 205 tabeof[statename] = ef.__name__ if ef else None 206 tf.write('_lexstateeoff = %s\n' % repr(tabeof)) 207 208 # ------------------------------------------------------------ 209 # readtab() - Read lexer information from a tab file 210 # ------------------------------------------------------------ 211 def readtab(self, tabfile, fdict): 212 if isinstance(tabfile, types.ModuleType): 213 lextab = tabfile 214 else: 215 exec('import %s' % tabfile) 216 lextab = sys.modules[tabfile] 217 218 if getattr(lextab, '_tabversion', '0.0') != __tabversion__: 219 raise ImportError('Inconsistent PLY version') 220 221 self.lextokens = lextab._lextokens 222 self.lexreflags = lextab._lexreflags 223 self.lexliterals = lextab._lexliterals 224 self.lextokens_all = self.lextokens | set(self.lexliterals) 225 self.lexstateinfo = lextab._lexstateinfo 226 self.lexstateignore = lextab._lexstateignore 227 self.lexstatere = {} 228 self.lexstateretext = {} 229 for statename, lre in lextab._lexstatere.items(): 230 titem = [] 231 txtitem = [] 232 for pat, func_name in lre: 233 titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) 234 235 self.lexstatere[statename] = titem 236 self.lexstateretext[statename] = txtitem 237 238 self.lexstateerrorf = {} 239 for statename, ef in lextab._lexstateerrorf.items(): 240 self.lexstateerrorf[statename] = fdict[ef] 241 242 self.lexstateeoff = {} 243 for statename, ef in lextab._lexstateeoff.items(): 244 self.lexstateeoff[statename] = fdict[ef] 245 246 self.begin('INITIAL') 247 248 # ------------------------------------------------------------ 249 # input() - Push a new string into the lexer 250 # ------------------------------------------------------------ 251 def input(self, s): 252 # Pull off the first character to see if s looks like a string 253 c = s[:1] 254 if not isinstance(c, StringTypes): 255 raise ValueError('Expected a string') 256 self.lexdata = s 257 self.lexpos = 0 258 self.lexlen = len(s) 259 260 # ------------------------------------------------------------ 261 # begin() - Changes the lexing state 262 # ------------------------------------------------------------ 263 def begin(self, state): 264 if state not in self.lexstatere: 265 raise ValueError('Undefined state') 266 self.lexre = self.lexstatere[state] 267 self.lexretext = self.lexstateretext[state] 268 self.lexignore = self.lexstateignore.get(state, '') 269 self.lexerrorf = self.lexstateerrorf.get(state, None) 270 self.lexeoff = self.lexstateeoff.get(state, None) 271 self.lexstate = state 272 273 # ------------------------------------------------------------ 274 # push_state() - Changes the lexing state and saves old on stack 275 # ------------------------------------------------------------ 276 def push_state(self, state): 277 self.lexstatestack.append(self.lexstate) 278 self.begin(state) 279 280 # ------------------------------------------------------------ 281 # pop_state() - Restores the previous state 282 # ------------------------------------------------------------ 283 def pop_state(self): 284 self.begin(self.lexstatestack.pop()) 285 286 # ------------------------------------------------------------ 287 # current_state() - Returns the current lexing state 288 # ------------------------------------------------------------ 289 def current_state(self): 290 return self.lexstate 291 292 # ------------------------------------------------------------ 293 # skip() - Skip ahead n characters 294 # ------------------------------------------------------------ 295 def skip(self, n): 296 self.lexpos += n 297 298 # ------------------------------------------------------------ 299 # opttoken() - Return the next token from the Lexer 300 # 301 # Note: This function has been carefully implemented to be as fast 302 # as possible. Don't make changes unless you really know what 303 # you are doing 304 # ------------------------------------------------------------ 305 def token(self): 306 # Make local copies of frequently referenced attributes 307 lexpos = self.lexpos 308 lexlen = self.lexlen 309 lexignore = self.lexignore 310 lexdata = self.lexdata 311 312 while lexpos < lexlen: 313 # This code provides some short-circuit code for whitespace, tabs, and other ignored characters 314 if lexdata[lexpos] in lexignore: 315 lexpos += 1 316 continue 317 318 # Look for a regular expression match 319 for lexre, lexindexfunc in self.lexre: 320 m = lexre.match(lexdata, lexpos) 321 if not m: 322 continue 323 324 # Create a token for return 325 tok = LexToken() 326 tok.value = m.group() 327 tok.lineno = self.lineno 328 tok.lexpos = lexpos 329 330 i = m.lastindex 331 func, tok.type = lexindexfunc[i] 332 333 if not func: 334 # If no token type was set, it's an ignored token 335 if tok.type: 336 self.lexpos = m.end() 337 return tok 338 else: 339 lexpos = m.end() 340 break 341 342 lexpos = m.end() 343 344 # If token is processed by a function, call it 345 346 tok.lexer = self # Set additional attributes useful in token rules 347 self.lexmatch = m 348 self.lexpos = lexpos 349 350 newtok = func(tok) 351 352 # Every function must return a token, if nothing, we just move to next token 353 if not newtok: 354 lexpos = self.lexpos # This is here in case user has updated lexpos. 355 lexignore = self.lexignore # This is here in case there was a state change 356 break 357 358 # Verify type of the token. If not in the token map, raise an error 359 if not self.lexoptimize: 360 if newtok.type not in self.lextokens_all: 361 raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( 362 func.__code__.co_filename, func.__code__.co_firstlineno, 363 func.__name__, newtok.type), lexdata[lexpos:]) 364 365 return newtok 366 else: 367 # No match, see if in literals 368 if lexdata[lexpos] in self.lexliterals: 369 tok = LexToken() 370 tok.value = lexdata[lexpos] 371 tok.lineno = self.lineno 372 tok.type = tok.value 373 tok.lexpos = lexpos 374 self.lexpos = lexpos + 1 375 return tok 376 377 # No match. Call t_error() if defined. 378 if self.lexerrorf: 379 tok = LexToken() 380 tok.value = self.lexdata[lexpos:] 381 tok.lineno = self.lineno 382 tok.type = 'error' 383 tok.lexer = self 384 tok.lexpos = lexpos 385 self.lexpos = lexpos 386 newtok = self.lexerrorf(tok) 387 if lexpos == self.lexpos: 388 # Error method didn't change text position at all. This is an error. 389 raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) 390 lexpos = self.lexpos 391 if not newtok: 392 continue 393 return newtok 394 395 self.lexpos = lexpos 396 raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) 397 398 if self.lexeoff: 399 tok = LexToken() 400 tok.type = 'eof' 401 tok.value = '' 402 tok.lineno = self.lineno 403 tok.lexpos = lexpos 404 tok.lexer = self 405 self.lexpos = lexpos 406 newtok = self.lexeoff(tok) 407 return newtok 408 409 self.lexpos = lexpos + 1 410 if self.lexdata is None: 411 raise RuntimeError('No input string given with input()') 412 return None 413 414 # Iterator interface 415 def __iter__(self): 416 return self 417 418 def next(self): 419 t = self.token() 420 if t is None: 421 raise StopIteration 422 return t 423 424 __next__ = next 425 426# ----------------------------------------------------------------------------- 427# ==== Lex Builder === 428# 429# The functions and classes below are used to collect lexing information 430# and build a Lexer object from it. 431# ----------------------------------------------------------------------------- 432 433# ----------------------------------------------------------------------------- 434# _get_regex(func) 435# 436# Returns the regular expression assigned to a function either as a doc string 437# or as a .regex attribute attached by the @TOKEN decorator. 438# ----------------------------------------------------------------------------- 439def _get_regex(func): 440 return getattr(func, 'regex', func.__doc__) 441 442# ----------------------------------------------------------------------------- 443# get_caller_module_dict() 444# 445# This function returns a dictionary containing all of the symbols defined within 446# a caller further down the call stack. This is used to get the environment 447# associated with the yacc() call if none was provided. 448# ----------------------------------------------------------------------------- 449def get_caller_module_dict(levels): 450 f = sys._getframe(levels) 451 ldict = f.f_globals.copy() 452 if f.f_globals != f.f_locals: 453 ldict.update(f.f_locals) 454 return ldict 455 456# ----------------------------------------------------------------------------- 457# _funcs_to_names() 458# 459# Given a list of regular expression functions, this converts it to a list 460# suitable for output to a table file 461# ----------------------------------------------------------------------------- 462def _funcs_to_names(funclist, namelist): 463 result = [] 464 for f, name in zip(funclist, namelist): 465 if f and f[0]: 466 result.append((name, f[1])) 467 else: 468 result.append(f) 469 return result 470 471# ----------------------------------------------------------------------------- 472# _names_to_funcs() 473# 474# Given a list of regular expression function names, this converts it back to 475# functions. 476# ----------------------------------------------------------------------------- 477def _names_to_funcs(namelist, fdict): 478 result = [] 479 for n in namelist: 480 if n and n[0]: 481 result.append((fdict[n[0]], n[1])) 482 else: 483 result.append(n) 484 return result 485 486# ----------------------------------------------------------------------------- 487# _form_master_re() 488# 489# This function takes a list of all of the regex components and attempts to 490# form the master regular expression. Given limitations in the Python re 491# module, it may be necessary to break the master regex into separate expressions. 492# ----------------------------------------------------------------------------- 493def _form_master_re(relist, reflags, ldict, toknames): 494 if not relist: 495 return [] 496 regex = '|'.join(relist) 497 try: 498 lexre = re.compile(regex, reflags) 499 500 # Build the index to function map for the matching engine 501 lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) 502 lexindexnames = lexindexfunc[:] 503 504 for f, i in lexre.groupindex.items(): 505 handle = ldict.get(f, None) 506 if type(handle) in (types.FunctionType, types.MethodType): 507 lexindexfunc[i] = (handle, toknames[f]) 508 lexindexnames[i] = f 509 elif handle is not None: 510 lexindexnames[i] = f 511 if f.find('ignore_') > 0: 512 lexindexfunc[i] = (None, None) 513 else: 514 lexindexfunc[i] = (None, toknames[f]) 515 516 return [(lexre, lexindexfunc)], [regex], [lexindexnames] 517 except Exception: 518 m = int(len(relist)/2) 519 if m == 0: 520 m = 1 521 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) 522 rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) 523 return (llist+rlist), (lre+rre), (lnames+rnames) 524 525# ----------------------------------------------------------------------------- 526# def _statetoken(s,names) 527# 528# Given a declaration name s of the form "t_" and a dictionary whose keys are 529# state names, this function returns a tuple (states,tokenname) where states 530# is a tuple of state names and tokenname is the name of the token. For example, 531# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') 532# ----------------------------------------------------------------------------- 533def _statetoken(s, names): 534 parts = s.split('_') 535 for i, part in enumerate(parts[1:], 1): 536 if part not in names and part != 'ANY': 537 break 538 539 if i > 1: 540 states = tuple(parts[1:i]) 541 else: 542 states = ('INITIAL',) 543 544 if 'ANY' in states: 545 states = tuple(names) 546 547 tokenname = '_'.join(parts[i:]) 548 return (states, tokenname) 549 550 551# ----------------------------------------------------------------------------- 552# LexerReflect() 553# 554# This class represents information needed to build a lexer as extracted from a 555# user's input file. 556# ----------------------------------------------------------------------------- 557class LexerReflect(object): 558 def __init__(self, ldict, log=None, reflags=0): 559 self.ldict = ldict 560 self.error_func = None 561 self.tokens = [] 562 self.reflags = reflags 563 self.stateinfo = {'INITIAL': 'inclusive'} 564 self.modules = set() 565 self.error = False 566 self.log = PlyLogger(sys.stderr) if log is None else log 567 568 # Get all of the basic information 569 def get_all(self): 570 self.get_tokens() 571 self.get_literals() 572 self.get_states() 573 self.get_rules() 574 575 # Validate all of the information 576 def validate_all(self): 577 self.validate_tokens() 578 self.validate_literals() 579 self.validate_rules() 580 return self.error 581 582 # Get the tokens map 583 def get_tokens(self): 584 tokens = self.ldict.get('tokens', None) 585 if not tokens: 586 self.log.error('No token list is defined') 587 self.error = True 588 return 589 590 if not isinstance(tokens, (list, tuple)): 591 self.log.error('tokens must be a list or tuple') 592 self.error = True 593 return 594 595 if not tokens: 596 self.log.error('tokens is empty') 597 self.error = True 598 return 599 600 self.tokens = tokens 601 602 # Validate the tokens 603 def validate_tokens(self): 604 terminals = {} 605 for n in self.tokens: 606 if not _is_identifier.match(n): 607 self.log.error("Bad token name '%s'", n) 608 self.error = True 609 if n in terminals: 610 self.log.warning("Token '%s' multiply defined", n) 611 terminals[n] = 1 612 613 # Get the literals specifier 614 def get_literals(self): 615 self.literals = self.ldict.get('literals', '') 616 if not self.literals: 617 self.literals = '' 618 619 # Validate literals 620 def validate_literals(self): 621 try: 622 for c in self.literals: 623 if not isinstance(c, StringTypes) or len(c) > 1: 624 self.log.error('Invalid literal %s. Must be a single character', repr(c)) 625 self.error = True 626 627 except TypeError: 628 self.log.error('Invalid literals specification. literals must be a sequence of characters') 629 self.error = True 630 631 def get_states(self): 632 self.states = self.ldict.get('states', None) 633 # Build statemap 634 if self.states: 635 if not isinstance(self.states, (tuple, list)): 636 self.log.error('states must be defined as a tuple or list') 637 self.error = True 638 else: 639 for s in self.states: 640 if not isinstance(s, tuple) or len(s) != 2: 641 self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) 642 self.error = True 643 continue 644 name, statetype = s 645 if not isinstance(name, StringTypes): 646 self.log.error('State name %s must be a string', repr(name)) 647 self.error = True 648 continue 649 if not (statetype == 'inclusive' or statetype == 'exclusive'): 650 self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) 651 self.error = True 652 continue 653 if name in self.stateinfo: 654 self.log.error("State '%s' already defined", name) 655 self.error = True 656 continue 657 self.stateinfo[name] = statetype 658 659 # Get all of the symbols with a t_ prefix and sort them into various 660 # categories (functions, strings, error functions, and ignore characters) 661 662 def get_rules(self): 663 tsymbols = [f for f in self.ldict if f[:2] == 't_'] 664 665 # Now build up a list of functions and a list of strings 666 self.toknames = {} # Mapping of symbols to token names 667 self.funcsym = {} # Symbols defined as functions 668 self.strsym = {} # Symbols defined as strings 669 self.ignore = {} # Ignore strings by state 670 self.errorf = {} # Error functions by state 671 self.eoff = {} # EOF functions by state 672 673 for s in self.stateinfo: 674 self.funcsym[s] = [] 675 self.strsym[s] = [] 676 677 if len(tsymbols) == 0: 678 self.log.error('No rules of the form t_rulename are defined') 679 self.error = True 680 return 681 682 for f in tsymbols: 683 t = self.ldict[f] 684 states, tokname = _statetoken(f, self.stateinfo) 685 self.toknames[f] = tokname 686 687 if hasattr(t, '__call__'): 688 if tokname == 'error': 689 for s in states: 690 self.errorf[s] = t 691 elif tokname == 'eof': 692 for s in states: 693 self.eoff[s] = t 694 elif tokname == 'ignore': 695 line = t.__code__.co_firstlineno 696 file = t.__code__.co_filename 697 self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) 698 self.error = True 699 else: 700 for s in states: 701 self.funcsym[s].append((f, t)) 702 elif isinstance(t, StringTypes): 703 if tokname == 'ignore': 704 for s in states: 705 self.ignore[s] = t 706 if '\\' in t: 707 self.log.warning("%s contains a literal backslash '\\'", f) 708 709 elif tokname == 'error': 710 self.log.error("Rule '%s' must be defined as a function", f) 711 self.error = True 712 else: 713 for s in states: 714 self.strsym[s].append((f, t)) 715 else: 716 self.log.error('%s not defined as a function or string', f) 717 self.error = True 718 719 # Sort the functions by line number 720 for f in self.funcsym.values(): 721 f.sort(key=lambda x: x[1].__code__.co_firstlineno) 722 723 # Sort the strings by regular expression length 724 for s in self.strsym.values(): 725 s.sort(key=lambda x: len(x[1]), reverse=True) 726 727 # Validate all of the t_rules collected 728 def validate_rules(self): 729 for state in self.stateinfo: 730 # Validate all rules defined by functions 731 732 for fname, f in self.funcsym[state]: 733 line = f.__code__.co_firstlineno 734 file = f.__code__.co_filename 735 module = inspect.getmodule(f) 736 self.modules.add(module) 737 738 tokname = self.toknames[fname] 739 if isinstance(f, types.MethodType): 740 reqargs = 2 741 else: 742 reqargs = 1 743 nargs = f.__code__.co_argcount 744 if nargs > reqargs: 745 self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) 746 self.error = True 747 continue 748 749 if nargs < reqargs: 750 self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) 751 self.error = True 752 continue 753 754 if not _get_regex(f): 755 self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) 756 self.error = True 757 continue 758 759 try: 760 c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) 761 if c.match(''): 762 self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) 763 self.error = True 764 except re.error as e: 765 self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) 766 if '#' in _get_regex(f): 767 self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) 768 self.error = True 769 770 # Validate all rules defined by strings 771 for name, r in self.strsym[state]: 772 tokname = self.toknames[name] 773 if tokname == 'error': 774 self.log.error("Rule '%s' must be defined as a function", name) 775 self.error = True 776 continue 777 778 if tokname not in self.tokens and tokname.find('ignore_') < 0: 779 self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) 780 self.error = True 781 continue 782 783 try: 784 c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) 785 if (c.match('')): 786 self.log.error("Regular expression for rule '%s' matches empty string", name) 787 self.error = True 788 except re.error as e: 789 self.log.error("Invalid regular expression for rule '%s'. %s", name, e) 790 if '#' in r: 791 self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) 792 self.error = True 793 794 if not self.funcsym[state] and not self.strsym[state]: 795 self.log.error("No rules defined for state '%s'", state) 796 self.error = True 797 798 # Validate the error function 799 efunc = self.errorf.get(state, None) 800 if efunc: 801 f = efunc 802 line = f.__code__.co_firstlineno 803 file = f.__code__.co_filename 804 module = inspect.getmodule(f) 805 self.modules.add(module) 806 807 if isinstance(f, types.MethodType): 808 reqargs = 2 809 else: 810 reqargs = 1 811 nargs = f.__code__.co_argcount 812 if nargs > reqargs: 813 self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) 814 self.error = True 815 816 if nargs < reqargs: 817 self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) 818 self.error = True 819 820 for module in self.modules: 821 self.validate_module(module) 822 823 # ----------------------------------------------------------------------------- 824 # validate_module() 825 # 826 # This checks to see if there are duplicated t_rulename() functions or strings 827 # in the parser input file. This is done using a simple regular expression 828 # match on each line in the source code of the given module. 829 # ----------------------------------------------------------------------------- 830 831 def validate_module(self, module): 832 try: 833 lines, linen = inspect.getsourcelines(module) 834 except IOError: 835 return 836 837 fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') 838 sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') 839 840 counthash = {} 841 linen += 1 842 for line in lines: 843 m = fre.match(line) 844 if not m: 845 m = sre.match(line) 846 if m: 847 name = m.group(1) 848 prev = counthash.get(name) 849 if not prev: 850 counthash[name] = linen 851 else: 852 filename = inspect.getsourcefile(module) 853 self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) 854 self.error = True 855 linen += 1 856 857# ----------------------------------------------------------------------------- 858# lex(module) 859# 860# Build all of the regular expression rules from definitions in the supplied module 861# ----------------------------------------------------------------------------- 862def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', 863 reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): 864 865 if lextab is None: 866 lextab = 'lextab' 867 868 global lexer 869 870 ldict = None 871 stateinfo = {'INITIAL': 'inclusive'} 872 lexobj = Lexer() 873 lexobj.lexoptimize = optimize 874 global token, input 875 876 if errorlog is None: 877 errorlog = PlyLogger(sys.stderr) 878 879 if debug: 880 if debuglog is None: 881 debuglog = PlyLogger(sys.stderr) 882 883 # Get the module dictionary used for the lexer 884 if object: 885 module = object 886 887 # Get the module dictionary used for the parser 888 if module: 889 _items = [(k, getattr(module, k)) for k in dir(module)] 890 ldict = dict(_items) 891 # If no __file__ attribute is available, try to obtain it from the __module__ instead 892 if '__file__' not in ldict: 893 ldict['__file__'] = sys.modules[ldict['__module__']].__file__ 894 else: 895 ldict = get_caller_module_dict(2) 896 897 # Determine if the module is package of a package or not. 898 # If so, fix the tabmodule setting so that tables load correctly 899 pkg = ldict.get('__package__') 900 if pkg and isinstance(lextab, str): 901 if '.' not in lextab: 902 lextab = pkg + '.' + lextab 903 904 # Collect parser information from the dictionary 905 linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) 906 linfo.get_all() 907 if not optimize: 908 if linfo.validate_all(): 909 raise SyntaxError("Can't build lexer") 910 911 if optimize and lextab: 912 try: 913 lexobj.readtab(lextab, ldict) 914 token = lexobj.token 915 input = lexobj.input 916 lexer = lexobj 917 return lexobj 918 919 except ImportError: 920 pass 921 922 # Dump some basic debugging information 923 if debug: 924 debuglog.info('lex: tokens = %r', linfo.tokens) 925 debuglog.info('lex: literals = %r', linfo.literals) 926 debuglog.info('lex: states = %r', linfo.stateinfo) 927 928 # Build a dictionary of valid token names 929 lexobj.lextokens = set() 930 for n in linfo.tokens: 931 lexobj.lextokens.add(n) 932 933 # Get literals specification 934 if isinstance(linfo.literals, (list, tuple)): 935 lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) 936 else: 937 lexobj.lexliterals = linfo.literals 938 939 lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) 940 941 # Get the stateinfo dictionary 942 stateinfo = linfo.stateinfo 943 944 regexs = {} 945 # Build the master regular expressions 946 for state in stateinfo: 947 regex_list = [] 948 949 # Add rules defined by functions first 950 for fname, f in linfo.funcsym[state]: 951 regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) 952 if debug: 953 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) 954 955 # Now add all of the simple rules 956 for name, r in linfo.strsym[state]: 957 regex_list.append('(?P<%s>%s)' % (name, r)) 958 if debug: 959 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) 960 961 regexs[state] = regex_list 962 963 # Build the master regular expressions 964 965 if debug: 966 debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') 967 968 for state in regexs: 969 lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) 970 lexobj.lexstatere[state] = lexre 971 lexobj.lexstateretext[state] = re_text 972 lexobj.lexstaterenames[state] = re_names 973 if debug: 974 for i, text in enumerate(re_text): 975 debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) 976 977 # For inclusive states, we need to add the regular expressions from the INITIAL state 978 for state, stype in stateinfo.items(): 979 if state != 'INITIAL' and stype == 'inclusive': 980 lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) 981 lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) 982 lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) 983 984 lexobj.lexstateinfo = stateinfo 985 lexobj.lexre = lexobj.lexstatere['INITIAL'] 986 lexobj.lexretext = lexobj.lexstateretext['INITIAL'] 987 lexobj.lexreflags = reflags 988 989 # Set up ignore variables 990 lexobj.lexstateignore = linfo.ignore 991 lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') 992 993 # Set up error functions 994 lexobj.lexstateerrorf = linfo.errorf 995 lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) 996 if not lexobj.lexerrorf: 997 errorlog.warning('No t_error rule is defined') 998 999 # Set up eof functions 1000 lexobj.lexstateeoff = linfo.eoff 1001 lexobj.lexeoff = linfo.eoff.get('INITIAL', None) 1002 1003 # Check state information for ignore and error rules 1004 for s, stype in stateinfo.items(): 1005 if stype == 'exclusive': 1006 if s not in linfo.errorf: 1007 errorlog.warning("No error rule is defined for exclusive state '%s'", s) 1008 if s not in linfo.ignore and lexobj.lexignore: 1009 errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) 1010 elif stype == 'inclusive': 1011 if s not in linfo.errorf: 1012 linfo.errorf[s] = linfo.errorf.get('INITIAL', None) 1013 if s not in linfo.ignore: 1014 linfo.ignore[s] = linfo.ignore.get('INITIAL', '') 1015 1016 # Create global versions of the token() and input() functions 1017 token = lexobj.token 1018 input = lexobj.input 1019 lexer = lexobj 1020 1021 # If in optimize mode, we write the lextab 1022 if lextab and optimize: 1023 if outputdir is None: 1024 # If no output directory is set, the location of the output files 1025 # is determined according to the following rules: 1026 # - If lextab specifies a package, files go into that package directory 1027 # - Otherwise, files go in the same directory as the specifying module 1028 if isinstance(lextab, types.ModuleType): 1029 srcfile = lextab.__file__ 1030 else: 1031 if '.' not in lextab: 1032 srcfile = ldict['__file__'] 1033 else: 1034 parts = lextab.split('.') 1035 pkgname = '.'.join(parts[:-1]) 1036 exec('import %s' % pkgname) 1037 srcfile = getattr(sys.modules[pkgname], '__file__', '') 1038 outputdir = os.path.dirname(srcfile) 1039 try: 1040 lexobj.writetab(lextab, outputdir) 1041 if lextab in sys.modules: 1042 del sys.modules[lextab] 1043 except IOError as e: 1044 errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) 1045 1046 return lexobj 1047 1048# ----------------------------------------------------------------------------- 1049# runmain() 1050# 1051# This runs the lexer as a main program 1052# ----------------------------------------------------------------------------- 1053 1054def runmain(lexer=None, data=None): 1055 if not data: 1056 try: 1057 filename = sys.argv[1] 1058 f = open(filename) 1059 data = f.read() 1060 f.close() 1061 except IndexError: 1062 sys.stdout.write('Reading from standard input (type EOF to end):\n') 1063 data = sys.stdin.read() 1064 1065 if lexer: 1066 _input = lexer.input 1067 else: 1068 _input = input 1069 _input(data) 1070 if lexer: 1071 _token = lexer.token 1072 else: 1073 _token = token 1074 1075 while True: 1076 tok = _token() 1077 if not tok: 1078 break 1079 sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) 1080 1081# ----------------------------------------------------------------------------- 1082# @TOKEN(regex) 1083# 1084# This decorator function can be used to set the regex expression on a function 1085# when its docstring might need to be set in an alternative way 1086# ----------------------------------------------------------------------------- 1087 1088def TOKEN(r): 1089 def set_regex(f): 1090 if hasattr(r, '__call__'): 1091 f.regex = _get_regex(r) 1092 else: 1093 f.regex = r 1094 return f 1095 return set_regex 1096 1097# Alternative spelling of the TOKEN decorator 1098Token = TOKEN 1099