from BisonGen.Grammar import Output def OutputTables(bison, outfile): TableOutput(bison).output(outfile) return def OutputActions(parser, bison, outfile): if parser.options.get('mapping'): write_body = MappingBody elif parser.options.get('tuple'): write_body = TupleBody else: write_body = CodeBody write = outfile.write num = 0 for rule in parser.grammar.rules: num = num + 1 if rule.action.has_key('c'): # Only write functions that have an action write('case %d: {\n' % num) write(' /* from %s, line %d\n' % (rule.filename, rule.lineno)) grammar = reduce(lambda a, b: a + ' ' + b, rule.rhs, rule.lhs+':') write(' * %s\n' % grammar) write(' */\n') write_body(write, rule) write(' break;\n') write('}\n') return # Add C-specific writing routines to the grammar table writer class TableOutput(Output.OutputBase): def render_table(self, write, name, table, render=repr): write('%s[] = {\n' % name) indent = ' '*(len(name) + 4) for item in table: write(' %s,\n' % render(item)) write('};\n') return def output_number_table(self, write, name, table): write('%s[] = {' % name) width = len(name) + 4 for item in table[:-1]: if width > 70: write('\n') width = 0 num = '%d, ' % item write(num) width = width + len(num) if width > 70: write('\n') write('%d};\n' % table[-1]) return def output_token_defines(self, write): # Save them for module constants later self.token_defines = {} write('\n/* token definitions */\n') named_tokens = map(None, self.tags, self.user_toknums) # Start of user specified tokens user_tokens = self.user_toknums[self.error_token_number] for (name, token) in named_tokens: if token > user_tokens and name[0] != '"': write('#define %s %d\n' % (name, token)) self.token_defines[name] = token return def output_token_translations(self, write): write('\n/* vector mapping lexer token numbers into internal token numbers */\n') self.output_number_table(write, 'static const int token_translations', self.token_translations) write('\n#define YYTRANSLATE(x) ') write('((unsigned)(x) <= %d ? token_translations[x] : %d)\n' % (self.max_user_token, self.nsyms)) return def output_gram(self, write): tokens = [0] # ritem ends with a rule followed by 0 for item in self.ritem[:-1]: if item > 0: tokens.append(item) else: # The goto rule; skip and start a new list tokens.append(0) write('\n/* vector of items of all rules. */\n') self.output_number_table(write, 'static const int rhs_tokens', tokens) return def output_rule_data(self, write): repr_rule_info = lambda item: '"%s: line %d"' % item write('\n/* vector of line numbers and filename of all rules */\n') self.render_table(write, 'static const char* const rule_info', self.rline, repr_rule_info) def repr_token_names(item): item = str(item) if item[0] == item[-1] == "'": item = item[1:-1] return '"%s"' % item write('\n/* vector of string-names indexed by token number */\n') self.render_table(write, 'static const char* const token_names', self.tags, repr_token_names) write('\n/* symbol number of symbol that rule derives. */\n') self.output_number_table(write, 'static const int derives', self.rlhs) # Calculate the length of the rhs for each rule rhs_size = [0] for i in range(1, self.nrules): rhs_size.append(self.rrhs[i+1] - self.rrhs[i] - 1) rhs_size.append(self.nitems - self.rrhs[self.nrules] - 1) write('\n/* number of symbols composing right hand side of rule. */\n') self.output_number_table(write, 'static const int rhs_size', rhs_size) return def token_actions(self, write): """ figure out the actions for the specified state, indexed by lookahead token type. """ actions = map(lambda s, f=self.action_row: f(s), range(self.nstates)) write('\n/* default rule to reduce with in state. 0 means the default is an error.') write('\n indexed by state number */\n') self.output_number_table(write, 'static const int default_action', actions) return def goto_actions(self, write): """ figure out what to do after reducing with each rule, depending on the saved state from before the beginning of parsing the data that matched this rule. """ gotos = map(lambda t, f=self.default_goto: f(t), range(self.ntokens, self.nsyms)) write('\n/* default state to go to after a reduction of a rule.') write('\n indexed by variable number (lhs token) */\n') self.output_number_table(write, 'static const int default_goto', gotos) return # the following functions output yytable, yycheck and the # vectors whose elements index the portion starts def output_base(self, write): write('\n/* index in yytable of the portion describing state (indexed by state number)') write('\n If the value in yytable is positive, we shift the token and go to that state.') write('\n If the value is negative, it is minus a rule number to reduce by.') write('\n If the value is zero, the default action from yydefact[s] is used. */\n') self.output_number_table(write, 'static const int action_idx', self.base[:self.nstates]) write('\n/* The index in yytable of the portion describing what to do after reducing a rule.') write('\n The value from yytable is the state to go to. */\n') self.output_number_table(write, 'static const int goto_idx', self.base[self.nstates:]) del self.base return def output_table(self, write): write('\n/* A vector filled with portions for different uses.') write('\n (using action_idx and goto_idx) */\n') self.output_number_table(write, 'static const int yytable', self.table[:self.high+1]) del self.table return def output_check(self, write): write('\n/* a vector indexed in parallel with yytable.') write('\n It indicates the bounds of the portion you are trying to examine. */\n') self.output_number_table(write, 'static const int yycheck', self.check[:self.high+1]) del self.check return def output_defines(self, write): write("\n#define YYLAST %d" % self.high) write("\n#define YYFINAL %d" % self.final_state) write("\n#define YYFLAG %d" % Output.MINSHORT) write("\n#define YYNTBASE %d" % self.ntokens) write("\n") return # -- action routines --------------------------------------------------- def MappingBody(write, rule): write(' yyval = PyDict_New();\n') write(' PyDict_SetItemString(yyval, "TYPE", PyString_FromString("%s"));\n' % rule.lhs.upper()) for n in range(len(rule.rhs)): write(' PyDict_SetItem(yyval, ') write('PyInt_FromLong((long)%d), value_ptr[%d]);\n' % (n, n + 1)) return def TupleBody(write, rule): write(' yyval = PyTuple_New(%d);\n' % (len(rule.rhs) + 1)) write(' PyTuple_SET_ITEM(yyval, 0, PyString_FromString("%s"));\n' % rule.lhs.upper()) for n in range(1, len(rule.rhs)+1): write(' PyTuple_SET_ITEM(yyval, %d, value_ptr[%d]);\n' % (n, n)) return import re whitespace = re.compile('\s*') equals = re.compile('\s*=') stack_ptr = re.compile('\$(\d+|\$)') def CodeBody(write, rule): lines = rule.action.get('c', '').split('\n') # Remove leading and trailing lines that are only whitespace while not lines[0].strip(): lines = lines[1:] while not lines[-1].strip(): lines = lines[:-1] # find length of minimum amount of preceding whitespace space = len(lines[0]) for line in lines: match = whitespace.match(line) if match and match.end() < len(line): if match.end() < space: space = match.end() def replacer(match, offset=len(rule.rhs)): if match.group(1) == '$': return 'yyval' return 'value_ptr[%d]' % int(match.group(1)) for line in lines: fini = stack_ptr.sub(replacer, line) write(' %s\n' % fini.rstrip()[space:]) write(' if (self->verbose) {\n') write(' fprintf(stderr, "--%s(");\n' % rule.lhs) for pos in range(1, len(rule.rhs)+1): if pos > 1: write(' fprintf(stderr, ", ");\n') write(' PyObject_Print(value_ptr[%d], stderr, Py_PRINT_RAW);\n' % pos) write(' fprintf(stderr, ")\\n");\n') write(' }\n') for pos in range(1, len(rule.rhs)+1): write(' Py_DECREF(value_ptr[%d]);\n' % pos) return