# # DO NOT EDIT THIS FILE! # # Parser generated by BisonGen on Wed May 30 05:11:11 2001. # # token definitions AND = 257 OR = 258 EQ = 259 NE = 260 LT = 261 LE = 262 GT = 263 GE = 264 LSH = 265 RSH = 266 INTEGER = 267 IDENTIFIER = 268 DEFINED = 269 # vector mapping lexer token numbers into internal token numbers token_translations = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 24, 2, 2, 2, 23, 18, 2, 26, 27, 21, 19, 28, 20, 2, 22, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 17, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 16, 2, 25, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] YYTRANSLATE = lambda x: x > 269 and 44 or token_translations[x] # vector of items of all rules. rhs_tokens = [None, [30], [31], [32], [31, 4, 32], [33], [32, 3, 33], [34], [33, 16, 34], [35], [34, 17, 35], [36], [35, 18, 36], [37], [36, 5, 37], [36, 6, 37], [38], [37, 7, 38], [37, 8, 38], [37, 9, 38], [37, 10, 38], [39], [38, 11, 39], [38, 12, 39], [40], [39, 19, 40], [39, 20, 40], [41], [40, 21, 41], [40, 22, 41], [40, 23, 41], [42], [20, 42], [19, 42], [24, 42], [25, 42], [14], [13], [15, 26, 14, 27], [15, 14], [14, 26, 43, 27], [26, 30, 27], [30], [43, 28, 30], ] # vector of line numbers and filename of all rules rule_info = [": line 0", "ConstExpr.bgen: line 25", "ConstExpr.bgen: line 32", "ConstExpr.bgen: line 39", "ConstExpr.bgen: line 42", "ConstExpr.bgen: line 54", "ConstExpr.bgen: line 57", "ConstExpr.bgen: line 69", "ConstExpr.bgen: line 72", "ConstExpr.bgen: line 84", "ConstExpr.bgen: line 87", "ConstExpr.bgen: line 99", "ConstExpr.bgen: line 102", "ConstExpr.bgen: line 114", "ConstExpr.bgen: line 117", "ConstExpr.bgen: line 125", "ConstExpr.bgen: line 137", "ConstExpr.bgen: line 140", "ConstExpr.bgen: line 148", "ConstExpr.bgen: line 156", "ConstExpr.bgen: line 164", "ConstExpr.bgen: line 176", "ConstExpr.bgen: line 179", "ConstExpr.bgen: line 187", "ConstExpr.bgen: line 199", "ConstExpr.bgen: line 202", "ConstExpr.bgen: line 210", "ConstExpr.bgen: line 222", "ConstExpr.bgen: line 225", "ConstExpr.bgen: line 233", "ConstExpr.bgen: line 241", "ConstExpr.bgen: line 253", "ConstExpr.bgen: line 256", "ConstExpr.bgen: line 263", "ConstExpr.bgen: line 270", "ConstExpr.bgen: line 277", "ConstExpr.bgen: line 288", "ConstExpr.bgen: line 294", "ConstExpr.bgen: line 300", "ConstExpr.bgen: line 309", "ConstExpr.bgen: line 316", "ConstExpr.bgen: line 326", "ConstExpr.bgen: line 338", "ConstExpr.bgen: line 341", ] # vector of string-names indexed by token number token_names = ['$', 'error', '$undefined.', 'AND', 'OR', 'EQ', 'NE', 'LT', 'LE', 'GT', 'GE', 'LSH', 'RSH', 'INTEGER', 'IDENTIFIER', 'DEFINED', "'|'", "'^'", "'&'", "'+'", "'-'", "'*'", "'/'", "'%'", "'!'", "'~'", "'('", "')'", "','", 'const_expr', 'expr', 'logical_or_expr', 'logical_and_expr', 'bitwise_or_expr', 'bitwise_xor_expr', 'bitwise_and_expr', 'equality_expr', 'relational_expr', 'shift_expr', 'additive_expr', 'multiplicative_expr', 'unary_expr', 'primary_expr', 'arguments', 0, ] # symbol number of symbol that rule derives. derives = [0, 29, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 37, 37, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 40, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 43, 43] # number of symbols composing right hand side of rule. rhs_size = [0, 1, 1, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 1, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 3, 1, 2, 2, 2, 2, 1, 1, 4, 2, 4, 3, 1, 3] # default rule to reduce with in state. 0 means the default is an error. # indexed by state number default_action = [0, 37, 36, 0, 0, 0, 0, 0, 0, 1, 2, 3, 5, 7, 9, 11, 13, 16, 21, 24, 27, 31, 0, 39, 0, 33, 32, 34, 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 41, 4, 6, 8, 10, 12, 14, 15, 17, 18, 19, 20, 22, 23, 25, 26, 28, 29, 30, 40, 0, 38, 43, 0, 0, 0] # default state to go to after a reduction of a rule. # indexed by variable number (lhs token) default_goto = [74, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 49] # index in yytable of the portion describing state (indexed by state number) # If the value in yytable is positive, we shift the token and go to that state. # If the value is negative, it is minus a rule number to reduce by. # If the value is zero, the default action from yydefact[s] is used. action_idx = [-9, -32768, -14, -13, -6, -6, -6, -6, 8, -32768, 43, 53, 44, 42, 46, -3, 28, 7, 29, 3, -32768, -32768, 8, -32768, 47, -32768, -32768, -32768, -32768, 35, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, -32768, 23, 38, -32768, 53, 44, 42, 46, -3, 28, 28, 7, 7, 7, 7, 29, 29, 3, 3, -32768, -32768, -32768, -32768, 8, -32768, -32768, 66, 67, -32768] # the index in yytable of the portion describing what to do after reducing a rule. # The value from yytable is the state to go to. goto_idx = [-32768, -8, -32768, 40, 37, 41, 36, 45, 17, 2, 13, 14, -16, 39, -32768] # a vector filled with portions for different uses. (using action_idx and goto_idx) yytable = [29, 23, 35, 36, 1, 2, 3, 1, 2, 3, 4, 5, 22, 24, 48, 6, 7, 8, 41, 42, 8, 1, 2, 3, 45, 46, 47, 4, 5, 67, 68, 69, 6, 7, 8, 37, 38, 39, 40, 59, 60, 61, 62, 25, 26, 27, 28, 30, 43, 44, 70, 71, 57, 58, 63, 64, 31, 65, 66, 33, 32, 50, 51, 73, 34, 72, 75, 76, 53, 55, 52, 0, 0, 54, 0, 0, 0, 0, 0, 56] # a vector indexed in parallel with yytable. # It indicates the bounds of the portion you are trying to examine. yycheck = [8, 14, 5, 6, 13, 14, 15, 13, 14, 15, 19, 20, 26, 26, 22, 24, 25, 26, 11, 12, 26, 13, 14, 15, 21, 22, 23, 19, 20, 45, 46, 47, 24, 25, 26, 7, 8, 9, 10, 37, 38, 39, 40, 4, 5, 6, 7, 4, 19, 20, 27, 28, 35, 36, 41, 42, 3, 43, 44, 17, 16, 14, 27, 71, 18, 27, 0, 0, 31, 33, 30, -1, -1, 32, -1, -1, -1, -1, -1, 34] YYLAST = 79 YYFINAL = 76 YYFLAG = -32768 YYNTBASE = 29 ##if debug ##endif # Static definitions YYEMPTY = -2 YYEOF = 0 YYINITDEPTH = 1000 LEXER_FUNCTIONS = 0 class Parser: def __init__(self, verbose=0): self.verbose = verbose def debug_mode(self, flag=None): if flag is None: return self.verbose if type(flag) != type(1): raise TypeError('an integer is required') self.verbose = flag return flag def parse(self, text): state_stack = [0]*YYINITDEPTH value_stack = [0]*YYINITDEPTH ##if debug ##endif lexer_pos = 0 lexer_end = len(text) lexer_state = INITIAL lexer_last = 0 yylval = '' yystate = 0 yychar = YYEMPTY # cause a token to be read # Initialize stack pointers # Waste one element of value and location stack # so that they stay on the same level as the state stack. # The wasted elements are never initialized. state_ptr = -1 value_ptr = 0 while 1: # Push a new state, which is found in yystate. # In all cases, when you get here, the value and location stacks # have just been pushed. So pushing a state here evens the stacks. state_ptr = state_ptr + 1 state_stack[state_ptr] = yystate ##if debug ##endif # Do appropriate processing given the current state. # Read a lookahead token if we need one and don't already have one. # First try to decide what to do without reference to lookahead token. yyn = action_idx[yystate] if yyn == YYFLAG: yyn = default_action[yystate] if yyn == 0: self.report_error(yystate, lexer_pos, yylval) return 1 # Do a reduction. yyn is the number of a rule to reduce with. ##if debug ##endif state_ptr = state_ptr - rhs_size[yyn] value_ptr = value_ptr - rhs_size[yyn] if action_routines[yyn]: yyval = action_routines[yyn](self, value_stack, value_ptr) value_ptr = value_ptr + 1 value_stack[value_ptr] = yyval else: value_ptr = value_ptr + 1 ##if debug ##endif # Now "shift" the result of the reduction. # Determine what state that goes to, based on the state # we popped back to and the rule number reduced by. yyn = derives[yyn] - YYNTBASE yystate = goto_idx[yyn] + state_stack[state_ptr] if 0 <= yystate <= YYLAST and yycheck[yystate] == state_stack[state_ptr]: yystate = yytable[yystate] else: yystate = default_goto[yyn] continue # Not known => get a lookahead token if don't already have one. # yychar is either YYEMPTY, YYEOF or a valid token in external form if yychar == YYEMPTY: ##if debug ##endif ### Lexical analysis ### while lexer_last < lexer_end: lexer_pos = lexer_last try: match = patterns[lexer_state].match(text[lexer_pos:]) matched = filter(lambda (g,v): v is not None, match.groupdict().items())[0] except AttributeError: # comes here when match is none, it is an error anyway self.error('No action found for "%s"' % text[lexer_pos:]) lexer_last = lexer_last + len(matched[1]) lexer_action = pattern_actions[matched[0]] if lexer_action: lexer_state = lexer_action[0] or lexer_state ##if debug ##endif if len(lexer_action) > 1: yylval = text[lexer_pos:lexer_last] yychar = lexer_action[1] or ord(yylval) ##if debug ##endif break else: # Just a state change, reprocess the text lexer_last = lexer_pos continue # otherwise, throw away matched text else: # Reached end of input yychar = YYEOF # Convert token to internal form (in yychar1) for indexing tables with if yychar <= 0: # This means end-of-input. yychar1 = 0 ##if debug ##endif else: yychar1 = YYTRANSLATE(yychar) ##if debug ##endif yyn = yyn + yychar1 if yyn < 0 or yyn > YYLAST or yycheck[yyn] != yychar1: # comes here after end of input yyn = default_action[yystate] if yyn == 0: self.report_error(yystate, lexer_pos, yylval) return None # Do a reduction. yyn is the number of a rule to reduce with. ##if debug ##endif state_ptr = state_ptr - rhs_size[yyn] value_ptr = value_ptr - rhs_size[yyn] if action_routines[yyn]: yyval = action_routines[yyn](self, value_stack, value_ptr) value_ptr = value_ptr + 1 value_stack[value_ptr] = yyval else: value_ptr = value_ptr + 1 ##if debug ##endif # Now "shift" the result of the reduction. # Determine what state that goes to, based on the state # we popped back to and the rule number reduced by. yyn = derives[yyn] - YYNTBASE yystate = goto_idx[yyn] + state_stack[state_ptr] if 0 <= yystate <= YYLAST and yycheck[yystate] == state_stack[state_ptr]: yystate = yytable[yystate] else: yystate = default_goto[yyn] continue yyn = yytable[yyn] # yyn is what to do for this token type in this state. # Negative => reduce, -yyn is rule number. # Positive => shift, yyn is new state. # New state is final state => don't bother to shift # just return success. # 0, or max negative number => error. if YYFLAG < yyn < 0: yyn = -yyn # Do a reduction. yyn is the number of a rule to reduce with. ##if debug ##endif state_ptr = state_ptr - rhs_size[yyn] value_ptr = value_ptr - rhs_size[yyn] if action_routines[yyn]: yyval = action_routines[yyn](self, value_stack, value_ptr) value_ptr = value_ptr + 1 value_stack[value_ptr] = yyval else: value_ptr = value_ptr + 1 ##if debug ##endif # Now "shift" the result of the reduction. # Determine what state that goes to, based on the state # we popped back to and the rule number reduced by. yyn = derives[yyn] - YYNTBASE yystate = goto_idx[yyn] + state_stack[state_ptr] if 0 <= yystate <= YYLAST and yycheck[yystate] == state_stack[state_ptr]: yystate = yytable[yystate] else: yystate = default_goto[yyn] continue elif yyn == YYFINAL: return value_stack[value_ptr-1] elif yyn <= 0: # Now it is either 0 or YYFLAG self.report_error(yystate, lexer_pos, yylval) return None # Shift the lookahead token. ##if debug ##endif if yychar != YYEOF: yychar = YYEMPTY value_ptr = value_ptr + 1 value_stack[value_ptr] = yylval yystate = yyn continue # should never get here return None def report_error(self, state, column, lval): ruleno = action_idx[state] msg = "parse error at column %d, '%s'" % (column, lval) if YYFLAG < ruleno < YYLAST: # Start X at -ruleno to avoid negative indexes in yycheck start = ruleno < 0 and -ruleno or 0 first = 1 for x in range(start, len(token_names)): if yycheck[x + ruleno] == x: if first: msg = msg + ", expecting" first = 0 else: msg = msg + " or" msg = msg + " '%s'" % token_names[x] self.error(msg) return def announce(self, format, *args): sys.stderr.write(format % args) return def error(self, format, *args): raise SyntaxError(format % args) def print_reduce(self, rule): sys.stderr.write("Reducing via rule %d (%s), " % (rule, rule_info[rule])) # print the symbols being reduced and their result. for token in rhs_tokens[rule]: sys.stderr.write("%s " % token_names[token]) sys.stderr.write("-> %s\n" % token_names[derives[rule]]) return def print_state_stack(self, stack, size): sys.stderr.write("state stack now") for i in range(size+1): sys.stderr.write(" %d" % stack[i]) sys.stderr.write("\n") return new = Parser # modules required for action routines # the action code for each rule def logical_or_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 42 logical_or_expr: logical_or_expr OR logical_and_expr """ __val = not not (__stack[__ptr+1] or __stack[__ptr+3]) return __val def logical_and_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 57 logical_and_expr: logical_and_expr AND bitwise_or_expr """ __val = not not (__stack[__ptr+1] and __stack[__ptr+3]) return __val def bitwise_or_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 72 bitwise_or_expr: bitwise_or_expr '|' bitwise_xor_expr """ __val = __stack[__ptr+1] | __stack[__ptr+3] return __val def bitwise_xor_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 87 bitwise_xor_expr: bitwise_xor_expr '^' bitwise_and_expr """ __val = __stack[__ptr+1] ^ __stack[__ptr+3] return __val def bitwise_and_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 102 bitwise_and_expr: bitwise_and_expr '&' equality_expr """ __val = __stack[__ptr+1] & __stack[__ptr+3] return __val def equality_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 117 equality_expr: equality_expr EQ relational_expr """ __val = __stack[__ptr+1] == __stack[__ptr+3] return __val def equality_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 125 equality_expr: equality_expr NE relational_expr """ __val = __stack[__ptr+1] != __stack[__ptr+3] return __val def relational_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 140 relational_expr: relational_expr LT shift_expr """ __val = __stack[__ptr+1] < __stack[__ptr+3] return __val def relational_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 148 relational_expr: relational_expr LE shift_expr """ __val = __stack[__ptr+1] <= __stack[__ptr+3] return __val def relational_expr4(self, __stack, __ptr): """ from ConstExpr.bgen, line 156 relational_expr: relational_expr GT shift_expr """ __val = __stack[__ptr+1] > __stack[__ptr+3] return __val def relational_expr5(self, __stack, __ptr): """ from ConstExpr.bgen, line 164 relational_expr: relational_expr GE shift_expr """ __val = __stack[__ptr+1] >= __stack[__ptr+3] return __val def shift_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 179 shift_expr: shift_expr LSH additive_expr """ __val = __stack[__ptr+1] << __stack[__ptr+3] return __val def shift_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 187 shift_expr: shift_expr RSH additive_expr """ __val = __stack[__ptr+1] >> __stack[__ptr+3] return __val def additive_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 202 additive_expr: additive_expr '+' multiplicative_expr """ __val = __stack[__ptr+1] + __stack[__ptr+3] return __val def additive_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 210 additive_expr: additive_expr '-' multiplicative_expr """ __val = __stack[__ptr+1] - __stack[__ptr+3] return __val def multiplicative_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 225 multiplicative_expr: multiplicative_expr '*' unary_expr """ __val = __stack[__ptr+1] * __stack[__ptr+3] return __val def multiplicative_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 233 multiplicative_expr: multiplicative_expr '/' unary_expr """ __val = __stack[__ptr+1] / __stack[__ptr+3] return __val def multiplicative_expr4(self, __stack, __ptr): """ from ConstExpr.bgen, line 241 multiplicative_expr: multiplicative_expr '%' unary_expr """ __val = __stack[__ptr+1] % __stack[__ptr+3] return __val def unary_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 256 unary_expr: '-' primary_expr """ __val = -__stack[__ptr+2] return __val def unary_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 263 unary_expr: '+' primary_expr """ __val = +__stack[__ptr+2] return __val def unary_expr4(self, __stack, __ptr): """ from ConstExpr.bgen, line 270 unary_expr: '!' primary_expr """ __val = not __stack[__ptr+2] return __val def unary_expr5(self, __stack, __ptr): """ from ConstExpr.bgen, line 277 unary_expr: '~' primary_expr """ __val = ~__stack[__ptr+2] return __val def primary_expr1(self, __stack, __ptr): """ from ConstExpr.bgen, line 288 primary_expr: IDENTIFIER """ __val = self.defines.get(__stack[__ptr+1], 0) return __val def primary_expr2(self, __stack, __ptr): """ from ConstExpr.bgen, line 294 primary_expr: INTEGER """ __val = long(eval(__stack[__ptr+1])) return __val def primary_expr3(self, __stack, __ptr): """ from ConstExpr.bgen, line 300 primary_expr: DEFINED '(' IDENTIFIER ')' """ __val = self.defines.has_key(__stack[__ptr+3]) return __val def primary_expr4(self, __stack, __ptr): """ from ConstExpr.bgen, line 309 primary_expr: DEFINED IDENTIFIER """ __val = self.defines.has_key(__stack[__ptr+2]) return __val def primary_expr5(self, __stack, __ptr): """ from ConstExpr.bgen, line 316 primary_expr: IDENTIFIER '(' arguments ')' """ __val = 0 return __val def primary_expr6(self, __stack, __ptr): """ from ConstExpr.bgen, line 326 primary_expr: '(' expr ')' """ __val = __stack[__ptr+2] return __val action_routines = [None, None, None, None, logical_or_expr2, None, logical_and_expr2, None, bitwise_or_expr2, None, bitwise_xor_expr2, None, bitwise_and_expr2, None, equality_expr2, equality_expr3, None, relational_expr2, relational_expr3, relational_expr4, relational_expr5, None, shift_expr2, shift_expr3, None, additive_expr2, additive_expr3, None, multiplicative_expr2, multiplicative_expr3, multiplicative_expr4, None, unary_expr2, unary_expr3, unary_expr4, unary_expr5, primary_expr1, primary_expr2, primary_expr3, primary_expr4, primary_expr5, primary_expr6, None, None, ] # start condition definitions for the lexer INITIAL = 1 # the expressions and information for each rule import re patterns = { INITIAL : re.compile('(?P&&)|(?P\\|\\|)|(?P==)|(?P!=)|(?P<<)|(?P<=)|(?P<)|(?P>>)|(?P>=)|(?P>)|(?P\\d+(L|l)?)|(?Pdefined)|(?P[a-zA-Z_][\\w_]*)|(?P\\s+)|(?P.)'), } pattern_actions = { 'p00' : (None, AND), 'p01' : (None, OR), 'p02' : (None, EQ), 'p03' : (None, NE), 'p04' : (None, LSH), 'p05' : (None, LE), 'p06' : (None, LT), 'p07' : (None, RSH), 'p08' : (None, GE), 'p09' : (None, GT), 'p10' : (None, INTEGER), 'p11' : (None, DEFINED), 'p12' : (None, IDENTIFIER), 'p13' : None, 'p14' : (None, None), } if __name__ == '__main__': import sys try: import readline except: pass try: import ConstExprc parser = ConstExprc.new(1) print 'Using C parser' except: import ConstExpr parser = ConstExpr.new(1) print 'Using Python parser' if len(sys.argv) > 1: result = parser.parse(sys.argv[1]) result.pprint() raise SystemExit() print 'Use -C to exit.' try: while 1: expr = raw_input('>>>') result = parser.parse(expr) result.pprint() except KeyboardInterrupt: raise SystemExit