1 # -----------------------------------------------------------------------------
4 # Copyright (C) 2001-2011,
5 # David M. Beazley (Dabeaz LLC)
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions are
12 # * Redistributions of source code must retain the above copyright notice,
13 # this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above copyright notice,
15 # this list of conditions and the following disclaimer in the documentation
16 # and/or other materials provided with the distribution.
17 # * Neither the name of the David Beazley or Dabeaz LLC may be used to
18 # endorse or promote products derived from this software without
19 # specific prior written permission.
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 # -----------------------------------------------------------------------------
35 __tabversion__ = "3.5" # Version of table file used
37 import re, sys, types, copy, os, inspect
39 # This tuple contains known string types
42 StringTypes = (types.StringType, types.UnicodeType)
43 except AttributeError:
45 StringTypes = (str, bytes)
47 # Extract the code attribute of a function. Different implementations
48 # are for Python 2/3 compatibility.
50 if sys.version_info[0] < 3:
57 # This regular expression is used to match valid token names
58 _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
60 # Exception thrown when invalid token encountered and no default error
63 class LexError(Exception):
64 def __init__(self,message,s):
65 self.args = (message,)
68 # Token class. This class is used to represent the tokens produced.
69 class LexToken(object):
71 return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
75 # This object is a stand-in for a logging object created by the
78 class PlyLogger(object):
81 def critical(self,msg,*args,**kwargs):
82 self.f.write((msg % args) + "\n")
84 def warning(self,msg,*args,**kwargs):
85 self.f.write("WARNING: "+ (msg % args) + "\n")
87 def error(self,msg,*args,**kwargs):
88 self.f.write("ERROR: " + (msg % args) + "\n")
93 # Null logger is used when no output is generated. Does nothing.
94 class NullLogger(object):
95 def __getattribute__(self,name):
97 def __call__(self,*args,**kwargs):
100 # -----------------------------------------------------------------------------
101 # === Lexing Engine ===
103 # The following Lexer class implements the lexer runtime. There are only
104 # a few public methods and attributes:
106 # input() - Store a new string in the lexer
107 # token() - Get the next token
108 # clone() - Clone the lexer
110 # lineno - Current line number
111 # lexpos - Current position in the input string
112 # -----------------------------------------------------------------------------
116 self.lexre = None # Master regular expression. This is a list of
117 # tuples (re,findex) where re is a compiled
118 # regular expression and findex is a list
119 # mapping regex group numbers to rules
120 self.lexretext = None # Current regular expression strings
121 self.lexstatere = {} # Dictionary mapping lexer states to master regexs
122 self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
123 self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
124 self.lexstate = "INITIAL" # Current lexer state
125 self.lexstatestack = [] # Stack of lexer states
126 self.lexstateinfo = None # State information
127 self.lexstateignore = {} # Dictionary of ignored characters for each state
128 self.lexstateerrorf = {} # Dictionary of error functions for each state
129 self.lexreflags = 0 # Optional re compile flags
130 self.lexdata = None # Actual input data (as a string)
131 self.lexpos = 0 # Current position in input text
132 self.lexlen = 0 # Length of the input text
133 self.lexerrorf = None # Error rule (if any)
134 self.lextokens = None # List of valid tokens
135 self.lexignore = "" # Ignored characters
136 self.lexliterals = "" # Literal characters that can be passed through
137 self.lexmodule = None # Module
138 self.lineno = 1 # Current line number
139 self.lexoptimize = 0 # Optimized mode
141 def clone(self,object=None):
144 # If the object parameter has been supplied, it means we are attaching the
145 # lexer to a new object. In this case, we have to rebind all methods in
146 # the lexstatere and lexstateerrorf tables.
150 for key, ritem in self.lexstatere.items():
152 for cre, findex in ritem:
155 if not f or not f[0]:
158 newfindex.append((getattr(object,f[0].__name__),f[1]))
159 newre.append((cre,newfindex))
161 c.lexstatere = newtab
162 c.lexstateerrorf = { }
163 for key, ef in self.lexstateerrorf.items():
164 c.lexstateerrorf[key] = getattr(object,ef.__name__)
168 # ------------------------------------------------------------
169 # writetab() - Write lexer information to a table file
170 # ------------------------------------------------------------
171 def writetab(self,tabfile,outputdir=""):
172 if isinstance(tabfile,types.ModuleType):
174 basetabfilename = tabfile.split(".")[-1]
175 filename = os.path.join(outputdir,basetabfilename)+".py"
176 tf = open(filename,"w")
177 tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
178 tf.write("_tabversion = %s\n" % repr(__tabversion__))
179 tf.write("_lextokens = %s\n" % repr(self.lextokens))
180 tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
181 tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
182 tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
185 # Collect all functions in the initial state
186 initial = self.lexstatere["INITIAL"]
191 initialfuncs.append(f)
193 for key, lre in self.lexstatere.items():
195 for i in range(len(lre)):
196 titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
199 tf.write("_lexstatere = %s\n" % repr(tabre))
200 tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
203 for key, ef in self.lexstateerrorf.items():
205 taberr[key] = ef.__name__
208 tf.write("_lexstateerrorf = %s\n" % repr(taberr))
211 # ------------------------------------------------------------
212 # readtab() - Read lexer information from a tab file
213 # ------------------------------------------------------------
214 def readtab(self,tabfile,fdict):
215 if isinstance(tabfile,types.ModuleType):
218 if sys.version_info[0] < 3:
219 exec("import %s as lextab" % tabfile)
222 exec("import %s as lextab" % tabfile, env,env)
223 lextab = env['lextab']
225 if getattr(lextab,"_tabversion","0.0") != __tabversion__:
226 raise ImportError("Inconsistent PLY version")
228 self.lextokens = lextab._lextokens
229 self.lexreflags = lextab._lexreflags
230 self.lexliterals = lextab._lexliterals
231 self.lexstateinfo = lextab._lexstateinfo
232 self.lexstateignore = lextab._lexstateignore
233 self.lexstatere = { }
234 self.lexstateretext = { }
235 for key,lre in lextab._lexstatere.items():
238 for i in range(len(lre)):
239 titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
240 txtitem.append(lre[i][0])
241 self.lexstatere[key] = titem
242 self.lexstateretext[key] = txtitem
243 self.lexstateerrorf = { }
244 for key,ef in lextab._lexstateerrorf.items():
245 self.lexstateerrorf[key] = fdict[ef]
246 self.begin('INITIAL')
248 # ------------------------------------------------------------
249 # input() - Push a new string into the lexer
250 # ------------------------------------------------------------
252 # Pull off the first character to see if s looks like a string
254 if not isinstance(c,StringTypes):
255 raise ValueError("Expected a string")
260 # ------------------------------------------------------------
261 # begin() - Changes the lexing state
262 # ------------------------------------------------------------
263 def begin(self,state):
264 if not state in self.lexstatere:
265 raise ValueError("Undefined state")
266 self.lexre = self.lexstatere[state]
267 self.lexretext = self.lexstateretext[state]
268 self.lexignore = self.lexstateignore.get(state,"")
269 self.lexerrorf = self.lexstateerrorf.get(state,None)
270 self.lexstate = state
272 # ------------------------------------------------------------
273 # push_state() - Changes the lexing state and saves old on stack
274 # ------------------------------------------------------------
275 def push_state(self,state):
276 self.lexstatestack.append(self.lexstate)
279 # ------------------------------------------------------------
280 # pop_state() - Restores the previous state
281 # ------------------------------------------------------------
283 self.begin(self.lexstatestack.pop())
285 # ------------------------------------------------------------
286 # current_state() - Returns the current lexing state
287 # ------------------------------------------------------------
288 def current_state(self):
291 # ------------------------------------------------------------
292 # skip() - Skip ahead n characters
293 # ------------------------------------------------------------
297 # ------------------------------------------------------------
298 # opttoken() - Return the next token from the Lexer
300 # Note: This function has been carefully implemented to be as fast
301 # as possible. Don't make changes unless you really know what
303 # ------------------------------------------------------------
305 # Make local copies of frequently referenced attributes
308 lexignore = self.lexignore
309 lexdata = self.lexdata
311 while lexpos < lexlen:
312 # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
313 if lexdata[lexpos] in lexignore:
317 # Look for a regular expression match
318 for lexre,lexindexfunc in self.lexre:
319 m = lexre.match(lexdata,lexpos)
322 # Create a token for return
324 tok.value = m.group()
325 tok.lineno = self.lineno
329 func,tok.type = lexindexfunc[i]
332 # If no token type was set, it's an ignored token
334 self.lexpos = m.end()
342 # If token is processed by a function, call it
344 tok.lexer = self # Set additional attributes useful in token rules
350 # Every function must return a token, if nothing, we just move to next token
352 lexpos = self.lexpos # This is here in case user has updated lexpos.
353 lexignore = self.lexignore # This is here in case there was a state change
356 # Verify type of the token. If not in the token map, raise an error
357 if not self.lexoptimize:
358 if not newtok.type in self.lextokens:
359 raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
360 func_code(func).co_filename, func_code(func).co_firstlineno,
361 func.__name__, newtok.type),lexdata[lexpos:])
365 # No match, see if in literals
366 if lexdata[lexpos] in self.lexliterals:
368 tok.value = lexdata[lexpos]
369 tok.lineno = self.lineno
372 self.lexpos = lexpos + 1
375 # No match. Call t_error() if defined.
378 tok.value = self.lexdata[lexpos:]
379 tok.lineno = self.lineno
384 newtok = self.lexerrorf(tok)
385 if lexpos == self.lexpos:
386 # Error method didn't change text position at all. This is an error.
387 raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
389 if not newtok: continue
393 raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
395 self.lexpos = lexpos + 1
396 if self.lexdata is None:
397 raise RuntimeError("No input string given with input()")
412 # -----------------------------------------------------------------------------
413 # ==== Lex Builder ===
415 # The functions and classes below are used to collect lexing information
416 # and build a Lexer object from it.
417 # -----------------------------------------------------------------------------
419 # -----------------------------------------------------------------------------
422 # Returns the regular expression assigned to a function either as a doc string
423 # or as a .regex attribute attached by the @TOKEN decorator.
424 # -----------------------------------------------------------------------------
426 def _get_regex(func):
427 return getattr(func,"regex",func.__doc__)
429 # -----------------------------------------------------------------------------
430 # get_caller_module_dict()
432 # This function returns a dictionary containing all of the symbols defined within
433 # a caller further down the call stack. This is used to get the environment
434 # associated with the yacc() call if none was provided.
435 # -----------------------------------------------------------------------------
437 def get_caller_module_dict(levels):
441 e,b,t = sys.exc_info()
446 ldict = f.f_globals.copy()
447 if f.f_globals != f.f_locals:
448 ldict.update(f.f_locals)
452 # -----------------------------------------------------------------------------
455 # Given a list of regular expression functions, this converts it to a list
456 # suitable for output to a table file
457 # -----------------------------------------------------------------------------
459 def _funcs_to_names(funclist,namelist):
461 for f,name in zip(funclist,namelist):
463 result.append((name, f[1]))
468 # -----------------------------------------------------------------------------
471 # Given a list of regular expression function names, this converts it back to
473 # -----------------------------------------------------------------------------
475 def _names_to_funcs(namelist,fdict):
479 result.append((fdict[n[0]],n[1]))
484 # -----------------------------------------------------------------------------
487 # This function takes a list of all of the regex components and attempts to
488 # form the master regular expression. Given limitations in the Python re
489 # module, it may be necessary to break the master regex into separate expressions.
490 # -----------------------------------------------------------------------------
492 def _form_master_re(relist,reflags,ldict,toknames):
493 if not relist: return []
494 regex = "|".join(relist)
496 lexre = re.compile(regex,re.VERBOSE | reflags)
498 # Build the index to function map for the matching engine
499 lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
500 lexindexnames = lexindexfunc[:]
502 for f,i in lexre.groupindex.items():
503 handle = ldict.get(f,None)
504 if type(handle) in (types.FunctionType, types.MethodType):
505 lexindexfunc[i] = (handle,toknames[f])
507 elif handle is not None:
509 if f.find("ignore_") > 0:
510 lexindexfunc[i] = (None,None)
512 lexindexfunc[i] = (None, toknames[f])
514 return [(lexre,lexindexfunc)],[regex],[lexindexnames]
516 m = int(len(relist)/2)
518 llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
519 rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
520 return llist+rlist, lre+rre, lnames+rnames
522 # -----------------------------------------------------------------------------
523 # def _statetoken(s,names)
525 # Given a declaration name s of the form "t_" and a dictionary whose keys are
526 # state names, this function returns a tuple (states,tokenname) where states
527 # is a tuple of state names and tokenname is the name of the token. For example,
528 # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
529 # -----------------------------------------------------------------------------
531 def _statetoken(s,names):
534 for i in range(1,len(parts)):
535 if not parts[i] in names and parts[i] != 'ANY': break
537 states = tuple(parts[1:i])
539 states = ('INITIAL',)
542 states = tuple(names)
544 tokenname = "_".join(parts[i:])
545 return (states,tokenname)
548 # -----------------------------------------------------------------------------
551 # This class represents information needed to build a lexer as extracted from a
553 # -----------------------------------------------------------------------------
554 class LexerReflect(object):
555 def __init__(self,ldict,log=None,reflags=0):
557 self.error_func = None
559 self.reflags = reflags
560 self.stateinfo = { 'INITIAL' : 'inclusive'}
565 self.log = PlyLogger(sys.stderr)
569 # Get all of the basic information
576 # Validate all of the information
577 def validate_all(self):
578 self.validate_tokens()
579 self.validate_literals()
580 self.validate_rules()
584 def get_tokens(self):
585 tokens = self.ldict.get("tokens",None)
587 self.log.error("No token list is defined")
591 if not isinstance(tokens,(list, tuple)):
592 self.log.error("tokens must be a list or tuple")
597 self.log.error("tokens is empty")
603 # Validate the tokens
604 def validate_tokens(self):
606 for n in self.tokens:
607 if not _is_identifier.match(n):
608 self.log.error("Bad token name '%s'",n)
611 self.log.warning("Token '%s' multiply defined", n)
614 # Get the literals specifier
615 def get_literals(self):
616 self.literals = self.ldict.get("literals","")
617 if not self.literals:
621 def validate_literals(self):
623 for c in self.literals:
624 if not isinstance(c,StringTypes) or len(c) > 1:
625 self.log.error("Invalid literal %s. Must be a single character", repr(c))
629 self.log.error("Invalid literals specification. literals must be a sequence of characters")
632 def get_states(self):
633 self.states = self.ldict.get("states",None)
636 if not isinstance(self.states,(tuple,list)):
637 self.log.error("states must be defined as a tuple or list")
640 for s in self.states:
641 if not isinstance(s,tuple) or len(s) != 2:
642 self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
646 if not isinstance(name,StringTypes):
647 self.log.error("State name %s must be a string", repr(name))
650 if not (statetype == 'inclusive' or statetype == 'exclusive'):
651 self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
654 if name in self.stateinfo:
655 self.log.error("State '%s' already defined",name)
658 self.stateinfo[name] = statetype
660 # Get all of the symbols with a t_ prefix and sort them into various
661 # categories (functions, strings, error functions, and ignore characters)
664 tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
666 # Now build up a list of functions and a list of strings
668 self.toknames = { } # Mapping of symbols to token names
669 self.funcsym = { } # Symbols defined as functions
670 self.strsym = { } # Symbols defined as strings
671 self.ignore = { } # Ignore strings by state
672 self.errorf = { } # Error functions by state
674 for s in self.stateinfo:
678 if len(tsymbols) == 0:
679 self.log.error("No rules of the form t_rulename are defined")
685 states, tokname = _statetoken(f,self.stateinfo)
686 self.toknames[f] = tokname
688 if hasattr(t,"__call__"):
689 if tokname == 'error':
692 elif tokname == 'ignore':
693 line = func_code(t).co_firstlineno
694 file = func_code(t).co_filename
695 self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
699 self.funcsym[s].append((f,t))
700 elif isinstance(t, StringTypes):
701 if tokname == 'ignore':
705 self.log.warning("%s contains a literal backslash '\\'",f)
707 elif tokname == 'error':
708 self.log.error("Rule '%s' must be defined as a function", f)
712 self.strsym[s].append((f,t))
714 self.log.error("%s not defined as a function or string", f)
717 # Sort the functions by line number
718 for f in self.funcsym.values():
719 if sys.version_info[0] < 3:
720 f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
723 f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
725 # Sort the strings by regular expression length
726 for s in self.strsym.values():
727 if sys.version_info[0] < 3:
728 s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
731 s.sort(key=lambda x: len(x[1]),reverse=True)
733 # Validate all of the t_rules collected
734 def validate_rules(self):
735 for state in self.stateinfo:
736 # Validate all rules defined by functions
740 for fname, f in self.funcsym[state]:
741 line = func_code(f).co_firstlineno
742 file = func_code(f).co_filename
743 module = inspect.getmodule(f)
744 self.modules[module] = 1
746 tokname = self.toknames[fname]
747 if isinstance(f, types.MethodType):
751 nargs = func_code(f).co_argcount
753 self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
758 self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
762 if not _get_regex(f):
763 self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
768 c = re.compile("(?P<%s>%s)" % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
770 self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
773 _etype, e, _etrace = sys.exc_info()
774 self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
775 if '#' in _get_regex(f):
776 self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
779 # Validate all rules defined by strings
780 for name,r in self.strsym[state]:
781 tokname = self.toknames[name]
782 if tokname == 'error':
783 self.log.error("Rule '%s' must be defined as a function", name)
787 if not tokname in self.tokens and tokname.find("ignore_") < 0:
788 self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
793 c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
795 self.log.error("Regular expression for rule '%s' matches empty string",name)
798 _etype, e, _etrace = sys.exc_info()
799 self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
801 self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
804 if not self.funcsym[state] and not self.strsym[state]:
805 self.log.error("No rules defined for state '%s'",state)
808 # Validate the error function
809 efunc = self.errorf.get(state,None)
812 line = func_code(f).co_firstlineno
813 file = func_code(f).co_filename
814 module = inspect.getmodule(f)
815 self.modules[module] = 1
817 if isinstance(f, types.MethodType):
821 nargs = func_code(f).co_argcount
823 self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
827 self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
830 for module in self.modules:
831 self.validate_module(module)
834 # -----------------------------------------------------------------------------
837 # This checks to see if there are duplicated t_rulename() functions or strings
838 # in the parser input file. This is done using a simple regular expression
839 # match on each line in the source code of the given module.
840 # -----------------------------------------------------------------------------
842 def validate_module(self, module):
843 lines, linen = inspect.getsourcelines(module)
845 fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
846 sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
856 prev = counthash.get(name)
858 counthash[name] = linen
860 filename = inspect.getsourcefile(module)
861 self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
865 # -----------------------------------------------------------------------------
868 # Build all of the regular expression rules from definitions in the supplied module
869 # -----------------------------------------------------------------------------
870 def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
873 stateinfo = { 'INITIAL' : 'inclusive'}
875 lexobj.lexoptimize = optimize
879 errorlog = PlyLogger(sys.stderr)
883 debuglog = PlyLogger(sys.stderr)
885 # Get the module dictionary used for the lexer
886 if object: module = object
889 _items = [(k,getattr(module,k)) for k in dir(module)]
892 ldict = get_caller_module_dict(2)
894 # Collect parser information from the dictionary
895 linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
898 if linfo.validate_all():
899 raise SyntaxError("Can't build lexer")
901 if optimize and lextab:
903 lexobj.readtab(lextab,ldict)
912 # Dump some basic debugging information
914 debuglog.info("lex: tokens = %r", linfo.tokens)
915 debuglog.info("lex: literals = %r", linfo.literals)
916 debuglog.info("lex: states = %r", linfo.stateinfo)
918 # Build a dictionary of valid token names
919 lexobj.lextokens = { }
920 for n in linfo.tokens:
921 lexobj.lextokens[n] = 1
923 # Get literals specification
924 if isinstance(linfo.literals,(list,tuple)):
925 lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
927 lexobj.lexliterals = linfo.literals
929 # Get the stateinfo dictionary
930 stateinfo = linfo.stateinfo
933 # Build the master regular expressions
934 for state in stateinfo:
937 # Add rules defined by functions first
938 for fname, f in linfo.funcsym[state]:
939 line = func_code(f).co_firstlineno
940 file = func_code(f).co_filename
941 regex_list.append("(?P<%s>%s)" % (fname,_get_regex(f)))
943 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,_get_regex(f), state)
945 # Now add all of the simple rules
946 for name,r in linfo.strsym[state]:
947 regex_list.append("(?P<%s>%s)" % (name,r))
949 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
951 regexs[state] = regex_list
953 # Build the master regular expressions
956 debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
959 lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
960 lexobj.lexstatere[state] = lexre
961 lexobj.lexstateretext[state] = re_text
962 lexobj.lexstaterenames[state] = re_names
964 for i in range(len(re_text)):
965 debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
967 # For inclusive states, we need to add the regular expressions from the INITIAL state
968 for state,stype in stateinfo.items():
969 if state != "INITIAL" and stype == 'inclusive':
970 lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
971 lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
972 lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
974 lexobj.lexstateinfo = stateinfo
975 lexobj.lexre = lexobj.lexstatere["INITIAL"]
976 lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
977 lexobj.lexreflags = reflags
979 # Set up ignore variables
980 lexobj.lexstateignore = linfo.ignore
981 lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
983 # Set up error functions
984 lexobj.lexstateerrorf = linfo.errorf
985 lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
986 if not lexobj.lexerrorf:
987 errorlog.warning("No t_error rule is defined")
989 # Check state information for ignore and error rules
990 for s,stype in stateinfo.items():
991 if stype == 'exclusive':
992 if not s in linfo.errorf:
993 errorlog.warning("No error rule is defined for exclusive state '%s'", s)
994 if not s in linfo.ignore and lexobj.lexignore:
995 errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
996 elif stype == 'inclusive':
997 if not s in linfo.errorf:
998 linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
999 if not s in linfo.ignore:
1000 linfo.ignore[s] = linfo.ignore.get("INITIAL","")
1002 # Create global versions of the token() and input() functions
1003 token = lexobj.token
1004 input = lexobj.input
1007 # If in optimize mode, we write the lextab
1008 if lextab and optimize:
1009 lexobj.writetab(lextab,outputdir)
1013 # -----------------------------------------------------------------------------
1016 # This runs the lexer as a main program
1017 # -----------------------------------------------------------------------------
1019 def runmain(lexer=None,data=None):
1022 filename = sys.argv[1]
1027 sys.stdout.write("Reading from standard input (type EOF to end):\n")
1028 data = sys.stdin.read()
1031 _input = lexer.input
1036 _token = lexer.token
1043 sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
1045 # -----------------------------------------------------------------------------
1048 # This decorator function can be used to set the regex expression on a function
1049 # when its docstring might need to be set in an alternative way
1050 # -----------------------------------------------------------------------------
1054 if hasattr(r,"__call__"):
1055 f.regex = _get_regex(r)
1061 # Alternative spelling of the TOKEN decorator