diff --git a/.gitignore b/.gitignore index 6383c12..10ebcff 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ vocab.* .project .classpath bin/ccg2xml.py +bin/xml2ccg.py bin/lex.py bin/yacc.py bin/ccg_editor.py diff --git a/bin/ccg-editor.py b/bin/ccg-editor.py deleted file mode 100644 index 07f0216..0000000 --- a/bin/ccg-editor.py +++ /dev/null @@ -1,923 +0,0 @@ -#!/usr/bin/python - -# Author: Ben Wing -# Date: April 2006 - -############################################################################# -# # -# ccg-editor.ply # -# # -# Edit a CCG-format file, graphically. Will have a mode for displaying # -# CCG files in a friendly fashion and allowing for editing of parts or # -# all of the file. Will also have a mode for testing a CCG grammar, and # -# allow for compilation and error-finding under control of the editor. # -# # -############################################################################# - -# This code is based on PyEdit version 1.1, from Oreilly's Programming -# Python, 2nd Edition, 2001, by Mark Lutz. - -from Tkinter import * # base widgets, constants -from tkFileDialog import * # standard dialogs -from tkMessageBox import * -from tkSimpleDialog import * -from tkColorChooser import askcolor -from ScrolledText import ScrolledText -from string import split, atoi -import sys, os, string -import ccg2xml -#Added by Sudipta -import Tree -import re - -START = '1.0' # index of first char: row=1,col=0 -SEL_FIRST = SEL + '.first' # map sel tag to index -SEL_LAST = SEL + '.last' # same as 'sel.last' - -FontScale = 0 # use bigger font on linux -if sys.platform[:3] != 'win': # and other non-windows boxes - FontScale = 3 - -# Initial top-level window; it's not clear we need this. -# FIXME: It sucks that we have to call Tk() to get the first top-level window -# but Toplevel() for all others. We should be able to call Tk() initially, -# and then Toplevel() to create all top-level windows, including the first. -root = None - -# List of all open CFile objects -openfiles = {} - -class CTab(Frame): - # Initialize this tab. Usually called from a subclass. PARENT is - # the parent widget, CFILE the CFile object associated with the - # top-level window, and TABNAME is the name of this tab (that tab - # will be removed from the toolbar). - def __init__(self, parent, cfile, tabname): - Frame.__init__(self, parent) - self.parent = parent - self.cfile = cfile - self.toolbar = None - self.checkbar = None - self.menubar = [ - ('File', 0, - [('Open...', 0, self.cfile.onOpen), - ('New', 0, self.cfile.onNew), - ('Save', 0, self.onSave), - ('Save As...', 5, self.onSaveAs), - ('Close', 0, self.cfile.onClose), - 'separator', - ('Quit...', 0, self.cfile.onQuit)] - ), - ('Tools', 0, - [('Font List', 0, self.cfile.onFontList), - ('Pick Bg...', 4, self.cfile.onPickBg), - ('Pick Fg...', 0, self.cfile.onPickFg), - ('Color List', 0, self.cfile.onColorList), - 'separator', - ('Info...', 0, self.cfile.onInfo), - ] - )] - self.toolbar = [ - #('Display', self.cfile.onDisplay, {'side': LEFT}), - ('Edit', self.cfile.onEdit, {'side': LEFT}), - ('Lexicon', self.cfile.onLexicon, {'side': LEFT}), - ('Testbed', self.cfile.onTestbed, {'side': LEFT}), - ('Features', self.cfile.onFeatures, {'side': LEFT}), - ('Rules', self.cfile.onRules, {'side': LEFT}), - ('Quit', self.cfile.onQuit, {'side': RIGHT}), - ('Help', self.cfile.help, {'side': RIGHT}), - ('Save', self.onSave, {'side': RIGHT}), - ] -# self.remove_toolbar_button(tabname) - - # Add MENU (a tuple corresponding to a single top-level menu item) - # after the item with the name AFTER. - def add_menu(self, after, menu): - newmenu = [] - for x in self.menubar: - newmenu += [x] - if x[0] == after: - newmenu += [menu] - self.menubar = newmenu - - # Remove the toolbar button named NAME. - def remove_toolbar_button(self, name): - newtoolbar = [] - for x in self.toolbar: - if x[0] != name: - newtoolbar += [x] - self.toolbar = newtoolbar - - def reinit(self): - pass - - ##################### - # File menu commands - ##################### - - def onSave(self): - self.onSaveAs(self.cfile.currfile) # may be None - - def onSaveAs(self, forcefile=None): - file = forcefile or self.cfile.my_asksaveasfilename() - if file: - text = self.cfile.getAllText() - try: - open(file, 'w').write(text) - except: - showerror('CCG Editor', 'Could not write file ' + file) - else: - self.cfile.setFileName(file) # may be newly created - self.cfile.edit_modified(NO) - - -class CEdit(CTab): - def __init__(self, parent, cfile): - CTab.__init__(self, parent, cfile, 'Edit') - - vbar = Scrollbar(self) - hbar = Scrollbar(self, orient='horizontal') - text = Text(self, padx=5, wrap='none', undo=YES) - - vbar.pack(side=RIGHT, fill=Y) - hbar.pack(side=BOTTOM, fill=X) # pack text last - text.pack(side=TOP, fill=BOTH, expand=YES) # else sbars clipped - - text.config(yscrollcommand=vbar.set) # call vbar.set on text move - text.config(xscrollcommand=hbar.set) - vbar.config(command=text.yview) # call text.yview on scroll move - hbar.config(command=text.xview) # or hbar['command']=text.xview - - text.config(font=self.cfile.fonts[0], - bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) - self.text = text - - self.add_menu('File', - ('Edit', 0, - [('Cut', 0, self.onCut), - ('Copy', 1, self.onCopy), - ('Paste', 0, self.onPaste), - 'separator', - ('Delete', 0, self.onDelete), - ('Select All', 0, self.onSelectAll)] - )) - self.add_menu('Edit', - ('Search', 0, - [('Goto...', 0, self.cfile.onGoto), - ('Find...', 0, self.cfile.onFind), - ('Refind', 0, self.cfile.onRefind), - ('Change...', 0, self.onChange)] - )) - - def reinit(self): - self.text.focus() - - ##################### - # Edit menu commands - ##################### - - def onCopy(self): # get text selected by mouse,etc - if not self.text.tag_ranges(SEL): # save in cross-app clipboard - showerror('CCG Editor', 'No text selected') - else: - text = self.text.get(SEL_FIRST, SEL_LAST) - self.clipboard_clear() - self.clipboard_append(text) - - def onDelete(self): # delete selected text, no save - if not self.text.tag_ranges(SEL): - showerror('CCG Editor', 'No text selected') - else: - self.text.delete(SEL_FIRST, SEL_LAST) - - def onCut(self): - if not self.text.tag_ranges(SEL): - showerror('CCG Editor', 'No text selected') - else: - self.onCopy() # save and delete selected text - self.onDelete() - - def onPaste(self): - try: - text = self.selection_get(selection='CLIPBOARD') - except TclError: - showerror('CCG Editor', 'Nothing to paste') - return - self.text.insert(INSERT, text) # add at current insert cursor - self.text.tag_remove(SEL, '1.0', END) - self.text.tag_add(SEL, INSERT+'-%dc' % len(text), INSERT) - self.text.see(INSERT) # select it, so it can be cut - - def onSelectAll(self): - self.text.tag_add(SEL, '1.0', END+'-1c') # select entire text - self.text.mark_set(INSERT, '1.0') # move insert point to top - self.text.see(INSERT) # scroll to top - - ####################### - # Search menu commands - ####################### - - def onChange(self): - new = Toplevel(self) - Label(new, text='Find text:').grid(row=0, column=0) - Label(new, text='Change to:').grid(row=1, column=0) - self.change1 = Entry(new) - self.change2 = Entry(new) - self.change1.grid(row=0, column=1, sticky=EW) - self.change2.grid(row=1, column=1, sticky=EW) - Button(new, text='Find', - command=self.onDoFind).grid(row=0, column=2, sticky=EW) - Button(new, text='Apply', - command=self.onDoChange).grid(row=1, column=2, sticky=EW) - new.columnconfigure(1, weight=1) # expandable entrys - - def onDoFind(self): - self.onFind(self.change1.get()) # Find in change box - - def onDoChange(self): - if self.text.tag_ranges(SEL): # must find first - self.text.delete(SEL_FIRST, SEL_LAST) # Apply in change - self.text.insert(INSERT, self.change2.get()) # deletes if empty - self.text.see(INSERT) - self.onFind(self.change1.get()) # goto next appear - self.text.update() # force refresh - - #################################### - # Others, useful outside this class - #################################### - - def isEmpty(self): - return not self.getAllText() - - def getAllText(self): - return self.text.get('1.0', END+'-1c') # extract text as a string - - def setAllText(self, text): - self.text.delete('1.0', END) # store text string in widget - self.text.insert(END, text) # or '1.0' - self.text.mark_set(INSERT, '1.0') # move insert point to top - self.text.see(INSERT) # scroll to top, insert set - self.cfile.edit_modified(NO) - - def clearAllText(self): - self.text.delete('1.0', END) # clear text in widget - -# class CDisplay(CTab): -# def __init__(self, parent, cfile): -# CTab.__init__(self, parent, cfile, 'Display') - -# # Use built-in text-with-scrollbar widget -# text = ScrolledText(self) -# text.config(font=self.cfile.fonts[0], -# bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) -# #text.config(font=('courier', 10, 'normal')) # use fixed-width font -# text.pack(side=TOP, fill=BOTH, expand=YES) - -# text.config(font=self.cfile.fonts[0], -# bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) -# self.text = text -# -# self.add_menu('Edit', -# ('Search', 0, -# [('Goto...', 0, self.cfile.onGoto), -# ('Find...', 0, self.cfile.onFind), -# ('Refind', 0, self.cfile.onRefind), -# )) -# -# def setAllText(self, text): -# self.text.config(state=NORMAL) -# self.text.delete('1.0', END) # store text string in widget -# self.text.insert(END, text) # or '1.0' -# self.text.mark_set(INSERT, '1.0') # move insert point to top -# self.text.see(INSERT) # scroll to top, insert set -# self.text.config(state=DISABLED) - -# def reinit(self): -# self.setAllText(self.cfile.getAllText()) -# self.text.focus() - -class CLexicon(CTab): - class lexicon_vars(object): - def __init__(self): - self.show_feat_id = IntVar() - self.show_feat_id.set(1) - self.show_feat_struct = IntVar() - self.show_feat_struct.set(1) - self.show_full_features = IntVar() - self.show_full_features.set(0) - self.show_semantics = IntVar() - self.show_semantics.set(1) - - def __init__(self, parent, cfile): - CTab.__init__(self, parent, cfile, 'Lexicon') - self.child = None - - self.vars = self.lexicon_vars() - # FIXME? It's a bit awkward that ccg.ply has references to the - # variables below scattered throughout it. But I'm not sure what - # a better solution would be. - self.checkbar = [ - ("Show feature ID's", self.vars.show_feat_id), - ("Show features", self.vars.show_feat_struct), - ('Full-form features', self.vars.show_full_features), - ('Show semantics', self.vars.show_semantics), - ] - - # Called when we switch to this mode using the toolbar at top. - def reinit(self): - self.redraw() - - # Called when a change is made to a checkbox setting. - # FIXME: There may be a smarter way to do this. - def redraw(self): - self.cfile.compile_if_needed() - if self.child: - self.child.pack_forget() - self.child = Frame(self, bd=2, relief=SUNKEN, background='white') - self.child.pack(expand=YES, fill=BOTH) - ccg2xml.draw_parse(self.cfile.curparse.parse, self.child, self.vars) - -class CRules(CTab): - def __init__(self, parent, cfile): - CTab.__init__(self, parent, cfile, 'Rules') - -class CFeatures(CTab): - def __init__(self, parent, cfile): - CTab.__init__(self, parent, cfile, 'Features') - self.child=None - self.checkbar=None - - # Called when we switch to this mode using the toolbar at top. - def reinit(self): - if self.child: - self.child.pack_forget() - - self.child = Frame(self, background='white') - self.child.pack(expand=YES, fill=BOTH) - butframe = Frame(self.child, cursor='hand2', - relief=SUNKEN, bd=2) - butframe.pack(fill=X) - but1 = Button(butframe, text='Expand All', command=self.expand_all) - but1.pack(side=LEFT) - but2 = Button(butframe, text='Contract All', command=self.contract_all) - but2.pack(side=LEFT) - featframe = Frame(self.child, bd=2, relief=SUNKEN, - background='white') - featframe.pack(expand=YES, fill=BOTH) - self.cfile.compile_if_needed() - - # Build the tree - self.tree={} - self.root_name = re.sub(r'^(.*)\.(.*)$', r'\1', self.cfile.file) - self.tree[self.root_name]=[] - for feat in self.cfile.curparse.feature_to_values: - self.tree[self.root_name] += [str(feat)] - for feat in self.cfile.curparse.feature_to_values: - self.tree[feat] = [] - # print str(self.cfile.curparse.feature_to_values[feat])+':' --> CASE, SEM-NUM - for x in self.cfile.curparse.feature_to_values[feat]: - # print str(x) --> CCGFeatval(nom, parents=[], licensing=[]) - if x.name not in self.tree : - self.tree[x.name] = [] - for x in self.cfile.curparse.feature_to_values[feat]: - if x.parents: - par = x.parents[0] - self.tree[par.name] += [x.name] - else: - self.tree[feat] += [x.name] - # Define the images for opened and closed categories - shut_icon=PhotoImage(data='R0lGODlhCQAQAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' - 'AAAJABAAQAIdhI8hu2EqXIroyQrb\nyRf0VG0UxnSZ5jFjulrhaxQ' - 'AO6olVwAAOw==') - open_icon=PhotoImage(data='R0lGODlhEAAJAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' - 'AAAQAAkAQAIahI+pyyEPg3KwPrko\nTqH7/yGUJWxcZTapUQAAO8b' - 'yUgAAOw==') - - # Create the tree - self.t=Tree.Tree(master=featframe, - root_id='', - root_label=self.root_name, - collapsed_icon = shut_icon, - expanded_icon = open_icon, - get_contents_callback = self.get_treedata, - line_flag=False) - - self.t.grid(row=0, column=0, sticky = 'nsew') - - featframe.grid_rowconfigure(0, weight=1) - featframe.grid_columnconfigure(0, weight=1) - - sb=Scrollbar(featframe) - sb.grid(row=0, column=1, sticky='ns') - self.t.configure(yscrollcommand=sb.set) - sb.configure(command=self.t.yview) - - sb=Scrollbar(featframe, orient=HORIZONTAL) - sb.grid(row=1, column=0, sticky='ew') - self.t.configure(xscrollcommand=sb.set) - sb.configure(command=self.t.xview) - - # Expand the whole tree out - self.expand_tree(self.t.root) - - # Returns the nodes rooted at the node passed and adds them to the tree - def get_treedata(self,node): - lbl = str(node.get_label()) - children = self.tree[lbl] - for x in children: - if self.tree[x]: - expands=1 - else: - expands=0 - self.t.add_node(name=x,flag=expands) - - # Expand the tree rooted at node recursively - def expand_tree(self, node): - node.expand() - for child in node.children(): - if child.expandable(): - self.expand_tree(child) - - def expand_all(self): - self.expand_tree(self.t.root) - - def contract_all(self): - self.t.root.collapse() - - -class CTestbed(CTab): - def __init__(self, parent, cfile): - CTab.__init__(self, parent, cfile, 'Testbed') - self.child = None - - def makelab(self, text, row, col, **props): - lab = Label(self.child, text=text, background='white', **props) - # Make the label grow to fill all space allocated for the column - lab.grid(row=row, column=col, sticky=W+E) - - # Called when we switch to this mode using the toolbar at top. - def reinit(self): - if self.child: - self.child.pack_forget() - self.child = Frame(self, bd=2, relief=SUNKEN, background='white') - self.child.pack(expand=YES, fill=BOTH) - self.cfile.compile_if_needed() - #self.makelab("Failure?", 0, 0, bd=1, relief=SUNKEN) - self.makelab("Sentence", 0, 0, bd=1, relief=SUNKEN) - self.makelab("Num Parses", 0, 1, bd=1, relief=SUNKEN) - # Make the column containing the sentences grow to include all - # extra space - self.child.columnconfigure(0, weight=1) - for i in xrange(len(self.cfile.curparse.testbed_statements)): - x = self.cfile.curparse.testbed_statements[i] - assert x[0] == 'item' - x = x[1] - # Left-justify the text - numparse = ccg2xml.getprop('numOfParses', x) - string = ccg2xml.getprop('string', x) - self.makelab('%s%s' % (numparse == 0 and '*' or '', string), - i+1, 0, anchor=W) - #known = ccg2xml.getoptprop('known', x) - #self.makelab(known and '*' or '', i+1, 0) - self.makelab('%s' % numparse, i+1, 1) - -# Object corresponding to a single top-level window editing a single file. -# Creates the top-level window and populates the widgets below it. -class CFile(object): - #### NOTE NOTE NOTE! Variables declared like this, in the class itself, - #### are class variables (not instance variables) until they are - #### assigned to. If you want pure instance variables, you need to - #### initialize them inside of __init__(). - - # Hash table describing modes and the associated class - modelist = {'Edit':CEdit, 'Lexicon':CLexicon, 'Features':CFeatures, - 'Testbed':CTestbed, 'Rules':CRules} - - startfiledir = '.' - ftypes = [('All files', '*'), # for file open dialog - ('Text files', '.txt'), # customize in subclass - ('Python files', '.py')] # or set in each instance - - colors = [{'fg':'black', 'bg':'white'}, # color pick list - {'fg':'yellow', 'bg':'black'}, # first item is default - {'fg':'white', 'bg':'blue'}, # tailor me as desired - {'fg':'black', 'bg':'beige'}, # or do PickBg/Fg chooser - {'fg':'yellow', 'bg':'purple'}, - {'fg':'black', 'bg':'brown'}, - {'fg':'lightgreen', 'bg':'darkgreen'}, - {'fg':'darkblue', 'bg':'orange'}, - {'fg':'orange', 'bg':'darkblue'}] - - fonts = [('courier', 9+FontScale, 'normal'), # platform-neutral fonts - ('courier', 12+FontScale, 'normal'), # (family, size, style) - ('courier', 10+FontScale, 'bold'), # or popup a listbox - ('courier', 10+FontScale, 'italic'), # make bigger on linux - ('times', 10+FontScale, 'normal'), - ('helvetica', 10+FontScale, 'normal'), - ('ariel', 10+FontScale, 'normal'), - ('system', 10+FontScale, 'normal'), - ('courier', 20+FontScale, 'normal')] - - def __init__(self, file=None): - self.file = file - - self.openDialog = None - self.saveDialog = None - self.lastfind = None - self.current_parse = None - self.text_when_last_compiled = None - self.mode = None - - # First top-level window is Tk(); rest are Toplevel() - global root - if not root: - root = Tk() - self.top = root - else: - self.top = Toplevel(root) - - ccg2xml.late_init_graphics() - openfiles[self] = True - self.top.protocol('WM_DELETE_WINDOW', self.onClose) - - # We create an outer frame to hold the toolbar and the main widget. - # Create all the different kinds of main widget. - # FIXME: Maybe outer isn't necessary? - self.outer = Frame(self.top) - self.outer.pack(expand=YES, fill=BOTH) # make frame stretchable - self.modes = {} - for mode in self.modelist: - self.modes[mode] = self.modelist[mode](self.outer, self) - self.main = None - self.toolbar_widget = None - self.checkbar_widget = None - self.switch_to('Edit') - self.setFileName(None) - if file: - self.onFirstOpen(file) - - def switch_to(self, mode): - # Switch to a different mode (display, edit, test). Remove the - # existing main and toolbar widgets, if existing. Redo the menubar - # and toolbar widgets according to the new mode and then display - # the new widgets. - # - # FIXME: We should probably create the menubar and toolbar widgets - # only once, and remember them. - if self.mode != mode: - if self.main: - self.main.pack_forget() - if self.toolbar_widget: - self.toolbar_widget.pack_forget() - if self.checkbar_widget: - self.checkbar_widget.pack_forget() - self.mode = mode - self.main = self.modes[mode] - self.makeMenubar() - self.makeToolbar(mode) - self.makeCheckbar() - self.main.reinit() - # Pack the main widget after the toolbar, so it goes below it. - self.main.pack(side=TOP, expand=YES, fill=BOTH) - - # Create the menubar; assumes that self.menubar has been set to the - # appropriate menubar description. Note that the menubar has to be a - # child of the top-level window itself rather than any child of it, so - # that it can be correctly displayed at the top of the window -- or - # possibly in its decoration (Windows) or at top of screen (Mac). - # - # From PP2E guimaker.py. - def makeMenubar(self): - menubar = Menu(self.top) - self.top.config(menu=menubar) - - for (name, key, items) in self.main.menubar: - pulldown = Menu(menubar) - self.addMenuItems(pulldown, items) - menubar.add_cascade(label=name, underline=key, menu=pulldown) - - if sys.platform[:3] == 'win': - menubar.add_command(label='Help', command=self.help) - else: - pulldown = Menu(menubar) # linux needs real pulldown - pulldown.add_command(label='About', command=self.help) - menubar.add_cascade(label='Help', menu=pulldown) - - # Add items to a menu or submenu. From PP2E guimaker.py. - def addMenuItems(self, menu, items): - for item in items: # scan nested items list - if item == 'separator': # string: add separator - menu.add_separator({}) - elif type(item) is list: # list: disabled item list - for num in item: - menu.entryconfig(num, state=DISABLED) - elif type(item[2]) is not list: - menu.add_command(label = item[0], # command: - underline = item[1], # add command - command = item[2]) # cmd=callable - else: - pullover = Menu(menu) - self.addMenuItems(pullover, item[2]) # sublist: - menu.add_cascade(label = item[0], # make submenu - underline = item[1], # add cascade - menu = pullover) - - def makeToolbar(self, selected): - """ - make toolbar (of buttons) at top, if any - expand=no, fill=x so same width on resize - """ - if self.main.toolbar: - self.toolbar_widget = Frame(self.outer, cursor='hand2', - relief=SUNKEN, bd=2) - self.toolbar_widget.pack(side=TOP, fill=X) - for (name, action, where) in self.main.toolbar: - but = Button(self.toolbar_widget, text=name, - command=action) - if name == selected: - but.config(relief=SUNKEN) - but.pack(where) - - def makeCheckbar(self): - """ - make check-button bar at top, if any - expand=no, fill=x so same width on resize - """ - if self.main.checkbar: - self.checkbar_widget = Frame(self.outer, cursor='hand2', - relief=SUNKEN, bd=2) - self.checkbar_widget.pack(side=TOP, fill=X) - for (name, var) in self.main.checkbar: - Checkbutton(self.checkbar_widget, text=name, - variable=var, - command=self.main.redraw).pack(side=LEFT) - - def getAllText(self): - return self.modes['Edit'].getAllText() - - def setAllText(self, text): - self.modes['Edit'].setAllText(text) - #self.modes['Display'].setAllText(text) - - def _getints(self, string): - """Internal function.""" - if string: - if type(string) is str: - textwid = self.modes['Edit'].text - return tuple(map(getint, textwid.tk.splitlist(string))) - else: - return string - - def edit(self, *args): - """Internal method - - This method controls the undo mechanism and - the modified flag. The exact behavior of the - command depends on the option argument that - follows the edit argument. The following forms - of the command are currently supported: - - edit_modified, edit_redo, edit_reset, edit_separator - and edit_undo - - """ - textwid = self.modes['Edit'].text - return self._getints( - textwid.tk.call((textwid._w, 'edit') + args)) or () - - def edit_modified(self, arg=None): - """Get or Set the modified flag - - If arg is not specified, returns the modified - flag of the widget. The insert, delete, edit undo and - edit redo commands or the user can set or clear the - modified flag. If boolean is specified, sets the - modified flag of the widget to arg. - """ - return self.edit("modified", arg) - - def onInfo(self): - text = self.getAllText() # added on 5/3/00 in 15 mins - bytes = len(text) # words uses a simple guess: - lines = len(string.split(text, '\n')) # any separated by whitespace - words = len(string.split(text)) - index = self.main.text.index(INSERT) - where = tuple(string.split(index, '.')) - - showinfo('CCG Editor Information', - 'Current location:\n\n' + - 'line:\t%s\ncolumn:\t%s\n\n' % where + - 'File text statistics:\n\n' + - 'Modified: %s\n\n' % self.edit_modified()+ - 'bytes:\t%d\nlines:\t%d\nwords:\t%d\n' % - (bytes, lines, words)) - - ####################### - # Search menu commands - ####################### - - def onGoto(self): - line = askinteger('CCG Editor', 'Enter line number') - self.main.text.update() - self.main.text.focus() - if line is not None: - maxindex = self.main.text.index(END+'-1c') - maxline = atoi(split(maxindex, '.')[0]) - if line > 0 and line <= maxline: - self.main.text.mark_set(INSERT, '%d.0' % line) # goto line - self.main.text.tag_remove(SEL, '1.0', END) # delete selects - self.main.text.tag_add(SEL, INSERT, 'insert + 1l') # select line - self.main.text.see(INSERT) # scroll to line - else: - showerror('CCG Editor', 'Bad line number') - - def onFind(self, lastkey=None): - key = lastkey or askstring('CCG Editor', 'Enter search string') - self.main.text.update() - self.main.text.focus() - self.lastfind = key - if key: - where = self.main.text.search(key, INSERT, END) # don't wrap - if not where: - showerror('CCG Editor', 'String not found') - else: - pastkey = where + '+%dc' % len(key) # index past key - self.main.text.tag_remove(SEL, '1.0', END) # remove any sel - self.main.text.tag_add(SEL, where, pastkey) # select key - self.main.text.mark_set(INSERT, pastkey) # for next find - self.main.text.see(where) # scroll display - - def onRefind(self): - self.onFind(self.lastfind) - - ###################### - # Tools menu commands - ###################### - - def onFontList(self): - self.fonts.append(self.fonts[0]) # pick next font in list - del self.fonts[0] # resizes the text area - self.modes['Edit'].text.config(font=self.fonts[0]) - self.modes['Display'].text.config(font=self.fonts[0]) - - def onColorList(self): - self.colors.append(self.colors[0]) # pick next color in list - del self.colors[0] # move current to end - self.modes['Edit'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) - self.modes['Display'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) - - def onPickFg(self): - self.pickColor('fg') - def onPickBg(self): - self.pickColor('bg') - def pickColor(self, part): - (triple, hexstr) = askcolor() - if hexstr: - apply(self.modes['Edit'].text.config, (), {part: hexstr}) - apply(self.modes['Display'].text.config, (), {part: hexstr}) - -# def onRunCode(self, parallelmode=1): -# """ -# run Python code being edited--not an ide, but handy; -# tries to run in file's dir, not cwd (may be pp2e root); -# inputs and adds command-line arguments for script files; -# code's stdin/out/err = editor's start window, if any; -# but parallelmode uses start to open a dos box for i/o; -# """ -# from PP2E.launchmodes import System, Start, Fork -# filemode = 0 -# thefile = str(self.getFileName()) -# cmdargs = askstring('CCG Editor', 'Commandline arguments?') or '' -# if os.path.exists(thefile): -# filemode = askyesno('CCG Editor', 'Run from file?') -# if not filemode: # run text string -# namespace = {'__name__': '__main__'} # run as top-level -# sys.argv = [thefile] + string.split(cmdargs) # could use threads -# exec self.getAllText() + '\n' in namespace # exceptions ignored -# elif askyesno('CCG Editor', 'Text saved in file?'): -# mycwd = os.getcwd() # cwd may be root -# os.chdir(os.path.dirname(thefile) or mycwd) # cd for filenames -# thecmd = thefile + ' ' + cmdargs -# if not parallelmode: # run as file -# System(thecmd, thecmd)() # block editor -# else: -# if sys.platform[:3] == 'win': # spawn in parallel -# Start(thecmd, thecmd)() # or use os.spawnv -# else: -# Fork(thecmd, thecmd)() # spawn in parallel -# os.chdir(mycwd) - - ##################### - # File menu commands - ##################### - - def my_askopenfilename(self): # objects remember last result dir/file - if not self.openDialog: - self.openDialog = Open(initialdir=self.startfiledir, - filetypes=self.ftypes) - return self.openDialog.show() - - def my_asksaveasfilename(self): # objects remember last result dir/file - if not self.saveDialog: - self.saveDialog = SaveAs(initialdir=self.startfiledir, - filetypes=self.ftypes) - return self.saveDialog.show() - - def onOpen(self): - file = self.my_askopenfilename() - # FIXME! Only create new window if file exists and is readable - if file: - CFile(file) - - def onFirstOpen(self, file): - try: - text = open(file, 'r').read() - except: - showerror('CCG Editor', 'Could not open file ' + file) - else: - self.setAllText(text) - self.setFileName(file) - - def compile_if_needed(self): - # FIXME! Retrieving the entire text and comparing it is potentially - # expensive. Probably a better way is to use a cryptographic hash - # (e.g. md5) to compare the results. A way to do this is described - # in the Programming Python book. - text = self.getAllText() - if text != self.text_when_last_compiled: - ccg2xml.init_global_state(errors_to_string=True) - ccg2xml.options.quiet = True - self.curparse = ccg2xml.parse_string(text) - #ccg2xml.debug("feature_values: %s\n", self.curparse.feature_values) - #ccg2xml.debug("feature_to_values: %s\n", self.curparse.feature_to_values) - self.text_when_last_compiled = text - - def onDisplay(self): - self.switch_to('Display') - - def onEdit(self): - self.switch_to('Edit') - - def onLexicon(self): - self.switch_to('Lexicon') - - def onTestbed(self): - self.switch_to('Testbed') - - def onRules(self): - self.switch_to('Rules') - - def onFeatures(self): - self.switch_to('Features') - - def onNew(self): - CFile() - - def getFileName(self): - return self.currfile - - def setFileName(self, name): - self.currfile = name # for save - if name: - title = 'CCG Editor: %s' % name - else: - title = 'CCG Editor' - self.top.title(title) - self.top.iconname(title) - - def help(self): - showinfo('Help', 'Sorry, no help for ' + self.__class__.__name__) - - # Close this window; if this is the last window, quit - def onClose(self): - assert self in openfiles - if len(openfiles) == 1 or self.top == root: - self.onQuit() - # If we got this far, the user refused to quit, so do nothing - else: - del openfiles[self] - self.top.destroy() - - def onQuit(self): - modfiles = False - for f in openfiles: - if f.edit_modified() == YES: - modfiles = True - break - if not modfiles or askyesno('CCG Editor', 'Files are modified, Really quit?'): - self.top.quit() - -def main(): - ccg2xml.parse_arguments(sys.argv[1:]) - ccg2xml.init_global_state_once() - if ccg2xml.global_args and len(ccg2xml.global_args) > 0: - # file name: - fname = ccg2xml.global_args[0] - else: - fname = None - - CFile(fname) - mainloop() - -if __name__ == '__main__': # when run as a script - main() diff --git a/bin/ccg2xml b/bin/ccg2xml index 2e296ab..a054eb1 100755 --- a/bin/ccg2xml +++ b/bin/ccg2xml @@ -1,3 +1,3 @@ #!/bin/sh -python "$OPENCCG_HOME/bin/ccg2xml.py" "$@" +python3 "$OPENCCG_HOME/bin/ccg2xml.py" "$@" diff --git a/bin/xml2ccg b/bin/xml2ccg new file mode 100755 index 0000000..3a948cf --- /dev/null +++ b/bin/xml2ccg @@ -0,0 +1,3 @@ +#!/bin/sh + +python3 "$OPENCCG_HOME/bin/xml2ccg.py" "$@" diff --git a/bin/xml2ccg.bat b/bin/xml2ccg.bat new file mode 100755 index 0000000..d5553f0 --- /dev/null +++ b/bin/xml2ccg.bat @@ -0,0 +1,2 @@ +@echo off +python %OPENCCG_HOME%/bin/xml2ccg.py %* diff --git a/build.xml b/build.xml index ea51475..98f5dea 100644 --- a/build.xml +++ b/build.xml @@ -33,7 +33,7 @@ - + @@ -57,7 +57,7 @@ - + @@ -81,7 +81,7 @@ - + @@ -93,7 +93,7 @@ - + + todir="${build.gen}/opennlp/ccgbank/parse"/> + javacchome="${lib.dir}" + outputdirectory="${build.gen}/opennlp/ccgbank/parse" /> + outputdirectory="${build.gen}/opennlp/ccgbank/parse" + javacchome="${lib.dir}/" /> - @@ -125,37 +124,37 @@ deprecation="${deprecation}" classpathref="build.classpath" nowarn="true" - includeAntRuntime="false" + includeAntRuntime="false" excludes="**/.backup.orig/**" - optimize="${optimize}"/> + optimize="${optimize}"/> - - + + - - - - - - - + + + + + + @@ -164,7 +163,7 @@ - + @@ -174,17 +173,17 @@ + target="clean"/> - - - @@ -199,7 +198,7 @@ - - - - - - - + + + + + + - - + + @@ -245,17 +244,17 @@ + classpathref="build.classpath" debug="on" includeAntRuntime="false"/> - - - - - + + + + + @@ -270,12 +269,12 @@ - + target="clean"/> @@ -283,8 +282,7 @@ - + diff --git a/src/ccg2xml/README b/src/ccg2xml/README old mode 100755 new mode 100644 diff --git a/src/ccg2xml/Tree.py b/src/ccg2xml/Tree.py index 4e03f35..4254cd1 100644 --- a/src/ccg2xml/Tree.py +++ b/src/ccg2xml/Tree.py @@ -9,9 +9,9 @@ # Thanks to Laurent Claustre for sending lots of helpful # bug reports. # -# This copyright license is intended to be similar to the FreeBSD license. +# This copyright license is intended to be similar to the FreeBSD license. # -# Copyright 1998 Gene Cash All rights reserved. +# Copyright 1998 Gene Cash All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -76,18 +76,18 @@ # I didn't pass "master" properly to the Canvas superclass. Sigh. # One step forward, one step back. -import Tkdnd -from Tkinter import * +import tkinter.dnd +from tkinter import * #------------------------------------------------------------------------------ def report_callback_exception(): """report exception on sys.stderr.""" import traceback import sys - + sys.stderr.write("Exception in Tree control callback\n") traceback.print_exc() - + #------------------------------------------------------------------------------ class Struct: """Helper object for add_node() method""" @@ -116,7 +116,7 @@ class Node: Please note that methods prefixed PVT_* are not meant to be used by client programs.""" - + def __init__(self, parent_node, id, collapsed_icon, x, y, parent_widget=None, expanded_icon=None, label=None, expandable_flag=0): @@ -200,7 +200,7 @@ def next_sib(self): return self.parent_node.child_nodes[i] else: return None - + def next_visible(self): """Return next lower visible node""" n=self @@ -216,7 +216,7 @@ def next_visible(self): n=n.parent_node # we're at bottom return self - + def prev_visible(self): """Return next higher visible node""" n=self @@ -229,7 +229,7 @@ def prev_visible(self): return j.PVT_last() else: return n - + def children(self): """Return list of node's children""" return self.child_nodes[:] @@ -249,7 +249,7 @@ def expanded(self): def expandable(self): """Returns true if node can be expanded (i.e. if it's a folder)""" return self.expandable_flag - + def full_id(self): """Return list of IDs of all parents and node ID""" if self.parent_node: @@ -261,7 +261,7 @@ def expand(self): """Expand node if possible""" if not self.expanded_flag: self.PVT_set_state(1) - + def collapse(self): """Collapse node if possible""" if self.expanded_flag: @@ -273,7 +273,7 @@ def delete(self, me_too=1): sw=self.widget if not self.parent_node and me_too: # can't delete the root node - raise ValueError, "can't delete root node" + raise ValueError("can't delete root node") self.PVT_delete_subtree() # move everything up so that distance to next subnode is correct n=self.next_visible() @@ -307,18 +307,18 @@ def insert_before(self, nodes): node's add_node() function to generate the list of nodes.""" i=self.parent_node.child_nodes.index(self) self.parent_node.PVT_insert(nodes, i, self.prev_visible()) - + def insert_after(self, nodes): """Insert list of nodes as siblings after this node. Call parent node's add_node() function to generate the list of nodes.""" i=self.parent_node.child_nodes.index(self)+1 self.parent_node.PVT_insert(nodes, i, self.PVT_last()) - + def insert_children(self, nodes): """Insert list of nodes as children of this node. Call node's add_node() function to generate the list of nodes.""" self.PVT_insert(nodes, 0, self) - + def toggle_state(self): """Toggle node's state between expanded and collapsed, if possible""" if self.expandable_flag: @@ -326,12 +326,12 @@ def toggle_state(self): self.PVT_set_state(0) else: self.PVT_set_state(1) - + # ----- functions for drag'n'drop support ----- def PVT_enter(self, event): """detect mouse hover for drag'n'drop""" self.widget.target=self - + def dnd_end(self, target, event): """Notification that dnd processing has been ended. It DOES NOT imply that we've been dropped somewhere useful, we could have just been @@ -351,7 +351,7 @@ def PVT_last(self): while n.child_nodes: n=n.child_nodes[-1] return n - + def PVT_find(self, search): """Used by searching functions""" if self.id != search[0]: @@ -360,7 +360,7 @@ def PVT_find(self, search): if len(search) == 1: return self # get list of children IDs - i=map(lambda x: x.id, self.child_nodes) + i=[x.id for x in self.child_nodes] # if there is a child that matches, search it try: return self.child_nodes[i.index(search[1])].PVT_find(search[1:]) @@ -373,7 +373,7 @@ def PVT_insert(self, nodes, pos, below): the new nodes are inserted. "below" is node which new children should appear immediately below.""" if not self.expandable_flag: - raise TypeError, 'not an expandable node' + raise TypeError('not an expandable node') # for speed sw=self.widget # expand and insert children @@ -409,7 +409,7 @@ def PVT_insert(self, nodes, pos, below): self.PVT_cleanup_lines() self.PVT_update_scrollregion() sw.move_cursor(sw.pos) - + def PVT_set_state(self, state): """Common code forexpanding/collapsing folders. It's not re-entrant, and there are certain cases in which we can be called again before @@ -477,7 +477,7 @@ def PVT_delete_subtree(self): sw.move_cursor(self) # now subnodes will be properly garbage collected self.child_nodes=[] - + def PVT_unbind_all(self): """Unbind callbacks so node gets garbage-collected. This wasn't easy to figure out the proper way to do this. See also tag_bind() for the @@ -495,7 +495,7 @@ def PVT_tag_move(self, dist): bbox1=self.widget.bbox(self.widget.root.symbol, self.label) bbox2=self.widget.bbox('all') self.widget.dtag('move') - self.widget.addtag('move', 'overlapping', + self.widget.addtag('move', 'overlapping', bbox2[0], bbox1[3], bbox2[2], bbox2[3]) # untag cursor & node so they don't get moved too self.widget.dtag(self.widget.cursor_box, 'move') @@ -503,12 +503,12 @@ def PVT_tag_move(self, dist): self.widget.dtag(self.label, 'move') # now do the move of all the tagged objects self.widget.move('move', 0, dist) - + def PVT_click(self, event): """Handle mouse clicks by kicking off possible drag'n'drop processing""" if self.widget.drop_callback: - if Tkdnd.dnd_start(self, event): + if tkinter.dnd.dnd_start(self, event): x1, y1, x2, y2=self.widget.bbox(self.symbol) self.x_off=(x1-x2)/2 self.y_off=(y1-y2)/2 @@ -528,7 +528,7 @@ def __init__(self, master, root_id, root_label='', *args, **kw_args): # pass args to superclass (new idiom from Python 2.2) Canvas.__init__(self, master, *args, **kw_args) - + # this allows to subclass Node and pass our class in self.node_class=node_class # keep track of node bindings @@ -590,7 +590,7 @@ def __init__(self, master, root_id, root_label='', expanded_icon=self.expanded_icon, x=dist_x, y=dist_y, parent_widget=self) # configure for scrollbar(s) - x1, y1, x2, y2=self.bbox('all') + x1, y1, x2, y2=self.bbox('all') self.configure(scrollregion=(x1, y1, x2+5, y2+5)) # add a cursor self.cursor_box=self.create_rectangle(0, 0, 0, 0) @@ -620,15 +620,15 @@ def __init__(self, master, root_id, root_label='', def PVT_mousefocus(self, event): """Soak up event argument when moused-over""" self.focus_set() - + # ----- PUBLIC METHODS ----- def tag_bind(self, tag, seq, *args, **kw_args): """Keep track of callback bindings so we can delete them later. I shouldn't have to do this!!!!""" # pass args to superclass - func_id=apply(Canvas.tag_bind, (self, tag, seq)+args, kw_args) + func_id = Canvas.tag_bind(self, tag, seq, *args, **kw_args) # save references - self.bindings[tag]=self.bindings.get(tag, [])+[(seq, func_id)] + self.bindings[tag] = self.bindings.get(tag, [])+[(seq, func_id)] def add_list(self, list=None, name=None, id=None, flag=0, expanded_icon=None, collapsed_icon=None): @@ -668,22 +668,22 @@ def add_node(self, name=None, id=None, flag=0, expanded_icon=None, def find_full_id(self, search): """Search for a node""" return self.root.PVT_find(search) - + def cursor_node(self, search): """Return node under cursor""" return self.pos - + def see(self, *items): """Scroll (in a series of nudges) so items are visible""" - x1, y1, x2, y2=apply(self.bbox, items) - while x2 > self.canvasx(0)+self.winfo_width(): - old=self.canvasx(0) + x1, y1, x2, y2 = self.bbox(*items) + while x2 > self.canvasx(0) + self.winfo_width(): + old = self.canvasx(0) self.xview('scroll', 1, 'units') # avoid endless loop if we can't scroll if old == self.canvasx(0): break - while y2 > self.canvasy(0)+self.winfo_height(): - old=self.canvasy(0) + while y2 > self.canvasy(0) + self.winfo_height(): + old = self.canvasy(0) self.yview('scroll', 1, 'units') if old == self.canvasy(0): break @@ -698,14 +698,14 @@ def see(self, *items): self.yview('scroll', -1, 'units') if old == self.canvasy(0): break - + def move_cursor(self, node): """Move cursor to node""" self.pos=node x1, y1, x2, y2=self.bbox(node.symbol, node.label) self.coords(self.cursor_box, x1-1, y1-1, x2+1, y2+1) self.see(node.symbol, node.label) - + def toggle(self, event=None): """Expand/collapse subtree""" self.pos.toggle_state() @@ -713,7 +713,7 @@ def toggle(self, event=None): def next(self, event=None): """Move to next lower visible node""" self.move_cursor(self.pos.next_visible()) - + def prev(self, event=None): """Move to next higher visible node""" self.move_cursor(self.pos.prev_visible()) @@ -733,7 +733,7 @@ def descend(self, event=None): self.move_cursor(self.pos.child_nodes[0]) return # if no subnodes, move to next sibling - self.next() + next(self) def first(self, event=None): """Go to root node""" @@ -762,7 +762,7 @@ def pagedown(self, event=None): n=n.next_visible() self.yview('scroll', 1, 'pages') self.move_cursor(n) - + # ----- functions for drag'n'drop support ----- def where(self, event): """Determine drag location in canvas coordinates. event.x & event.y @@ -775,7 +775,7 @@ def where(self, event): x=self.canvasx(event.x_root-x_org) y=self.canvasy(event.y_root-y_org) return x, y - + def dnd_accept(self, source, event): """Accept dnd messages, i.e. we're a legit drop target, and we do implement d&d functions.""" @@ -797,7 +797,7 @@ def dnd_enter(self, source, event): else: self.dnd_symbol=self.create_image(x, y, image=source.collapsed_icon) - self.dnd_label=self.create_text(x+self.text_offset, y, + self.dnd_label=self.create_text(x+self.text_offset, y, text=source.get_label(), justify='left', anchor='w') @@ -847,11 +847,11 @@ def dnd_commit(self, source, event): # argument is the node object being expanded # should call add_node() def get_contents(node): - path=apply(os.path.join, node.full_id()) + path = os.path.join(*node.full_id()) for filename in os.listdir(path): - full=os.path.join(path, filename) - name=filename - folder=0 + full = os.path.join(path, filename) + name = filename + folder = 0 if os.path.isdir(full): # it's a directory folder=1 @@ -904,5 +904,5 @@ def get_contents(node): # expand out the root t.root.expand() - + root.mainloop() diff --git a/src/ccg2xml/arabic.ccg b/src/ccg2xml/arabic.ccg old mode 100755 new mode 100644 diff --git a/src/ccg2xml/build.xml b/src/ccg2xml/build.xml index f6a312b..c6e7248 100644 --- a/src/ccg2xml/build.xml +++ b/src/ccg2xml/build.xml @@ -1,40 +1,41 @@ - - - - - - - - + + + + + + + + + - + + property="convert-ply.py.present"/> + message="convert-ply.py is not present"/> - + - + - + @@ -46,7 +47,7 @@ - + @@ -54,7 +55,7 @@ - + @@ -65,15 +66,24 @@ + + + + + + + + - + + - + @@ -81,6 +91,7 @@ + @@ -88,4 +99,3 @@ - diff --git a/src/ccg2xml/ccg.ply b/src/ccg2xml/ccg.ply old mode 100755 new mode 100644 index 106c0ba..a47f855 --- a/src/ccg2xml/ccg.ply +++ b/src/ccg2xml/ccg.ply @@ -21,14 +21,15 @@ import re import optparse import copy import os -import cStringIO +import io # Local imports import lex, yacc -from Tkinter import * -from tkMessageBox import * -import tkFont +from tkinter import * +from tkinter.messagebox import * +import tkinter.font as tkFont +from functools import reduce ## Get options @@ -193,13 +194,13 @@ def getoptprop(prop, proplist, default=None): # Replace value of property PROP with VALUE in PROPLIST. def putprop(prop, value, proplist): - for i in xrange(len(proplist)): + for i in range(len(proplist)): if proplist[i][0] == prop: proplist[i] = (prop, value) return else: proplist += [(prop, value)] - + # Replace property named PROP with NEW in PROPLIST. Often this is called with # with PROP equal to None; the None occurs when a PROP=VALUE clause is expected @@ -209,7 +210,7 @@ def putprop(prop, value, proplist): # The surrounding code calls property_name_replace() to fill in the proper name. def property_name_replace(prop, new, proplist): - for i in xrange(len(proplist)): + for i in range(len(proplist)): if proplist[i][0] == prop: proplist[i] = (new, proplist[i][1]) @@ -227,8 +228,8 @@ def init_errors(errors_to_string): global stdout_file, stderr_file if errors_to_string: - stdout_file = cStringIO.StringIO() - stderr_file = cStringIO.StringIO() + stdout_file = io.StringIO() + stderr_file = io.StringIO() else: stdout_file = sys.stdout stderr_file = sys.stderr @@ -242,7 +243,7 @@ def save_errors(cur): cur.stdout_file = stdout_file cur.stderr_file = stderr_file -class InternalError(StandardError): +class InternalError(BaseException): pass def argformat(format, arg): @@ -259,7 +260,7 @@ def synerr(format, *arg): # output at the same time, so the two will stay in sync. def maybe_errout(str): # Force display of error - # FIXME: Maybe we could dump all errors into a single + # FIXME: Maybe we could dump all errors into a single # window display and show the messages together #showerror('Message', str) @@ -291,8 +292,6 @@ def error(lineno, format, *arg): # Write formatted arguments to stderr, with Warning: printed. def warning(lineno, format, *arg): - global warning_count - warning_count += 1 error_or_warning('Warning', lineno, format, *arg) # Write formatted arguments to stdout. @@ -382,12 +381,6 @@ class CSBlock(CSNode): def isalnumund(str): return str.isalnum() or str in '_+-' -# Prior to Python 2.4, no sorted() -def my_sorted(lyst): - lystcopy = list(lyst) - lystcopy.sort() - return lystcopy - ######################################################################## # Tokenizing # ######################################################################## @@ -500,7 +493,7 @@ def t_MAGIC_ID(t): r'''(\[\*[^*]+\*\])''' t.type = 'MAGIC_ID' return t -#t_MAGIC_WORD.func_doc = '(\[\*(' + '|'.join(magic_names) + ')\*\])' +#t_MAGIC_WORD.__doc__ = '(\[\*(' + '|'.join(magic_names) + ')\*\])' t_ignore = " \t\r" @@ -545,7 +538,7 @@ def t_comment(t): def t_error(t): error(t.lineno, "Illegal character '%s'", t.value[0]) t.skip(1) - + def init_lexer(): # This is a signal to us to go into "line mode", where we return a # newline as a token and treat backslash at the end of a line as a line @@ -570,20 +563,20 @@ def p_word(p): 'FILLED IN BELOW' p[0] = p[1] # fill in the documentation (i.e. the cfg rule) -p_word.func_doc = 'word : ' + '\n| '.join(word_tokens) +p_word.__doc__ = 'word : ' + '\n| '.join(word_tokens) # hack, to deal with a reduce/reduce conflict def p_word_except_x(p): 'FILLED IN BELOW' p[0] = p[1] # fill in the documentation (i.e. the cfg rule) -p_word_except_x.func_doc = 'word_except_x : ' + '\n| '.join(word_no_x_tokens) +p_word_except_x.__doc__ = 'word_except_x : ' + '\n| '.join(word_no_x_tokens) def p_word_no_numbers(p): 'FILLED IN BELOW' p[0] = p[1] # fill in the documentation (i.e. the cfg rule) -p_word_no_numbers.func_doc = ( +p_word_no_numbers.__doc__ = ( 'word_no_numbers : ' + '\n| '.join(word_no_number_tokens)) %y @@ -728,15 +721,15 @@ def ifmatch_nocase(regex, string, doif, doelse): return doelse def print_macros(): - for (key, value) in macro_defs.iteritems(): - print "Macro: %s(%s): %s" % (key, value.args, value.text) + for (key, value) in list(macro_defs.items()): + print("Macro: %s(%s): %s" % (key, value.args, value.text)) # Given some text, expand the macros in it, recursively (i.e. apply # any macros, then apply macros to the resulting text, etc.). After # that, combine text that has the . operator applied to it. def macroexpand_text(text): if super_macro_debug: - print "Text before expanding: %s" % arg_to_text(text) + print("Text before expanding: %s" % arg_to_text(text)) # Now recursively expand macros. The code to actually check for # macros is in MacroLexer. lexer = MacroLexer(None) @@ -744,14 +737,14 @@ def macroexpand_text(text): newtext = [] while True: tok = lexer.token() - #print "Reading token: %s" % tok + # print("Reading token: %s" % tok) if not tok: break newtext.append(tok) text = newtext l = len(text) if super_macro_debug: - print "Text after expanding: %s" % arg_to_text(text) + print("Text after expanding: %s" % arg_to_text(text)) # Now directly handle instances with the '.' operator, so that # the operator can be used to create new macro calls x = 1 @@ -789,8 +782,8 @@ def macrosub(macdef, args, lineno): else: # Otherwise, make a copy of the text and substitute the arguments # into it. - text = text[:] - args = dict(zip(macdef.args, args)) + text = copy.deepcopy(text) + args = dict(list(zip(macdef.args, args))) l = len(text) x = 0 while x < l: @@ -874,7 +867,7 @@ def p_bracemacro_text_entry_other(p): 'FILLED IN BELOW' p[0] = [p.slice[1]] # fill in the documentation (i.e. the cfg rule) -p_bracemacro_text_entry_other.func_doc = ( +p_bracemacro_text_entry_other.__doc__ = ( 'bracemacro_text_entry : ' + '\n| '.join(other_tokens + word_tokens) ) @@ -900,10 +893,10 @@ linemacro_next_tokens = [x for x in tokens if x != 'NEWLINE'] # fill in the documentation (i.e. the cfg rule) -p_linemacro_begin.func_doc = ( +p_linemacro_begin.__doc__ = ( 'linemacro_begin : ' + '\n| '.join(linemacro_begin_tokens) ) -p_linemacro_next.func_doc = ( +p_linemacro_next.__doc__ = ( 'linemacro_next : ' + '\n| '.join(linemacro_next_tokens) ) @@ -1012,7 +1005,7 @@ def install_feature(feature, lis, lineno): synerr("Cycle seen involving feature value %s", node.name) for x in node.parents: check_cycles(x, list + [node]) - + # Check for cycles. for x in lis: check_cycles(x, []) @@ -1025,7 +1018,7 @@ def install_feature(feature, lis, lineno): if check_reachable(x, orignode): return True return False - + # Clean excess parents. for x in lis: newpar = [] @@ -1043,10 +1036,10 @@ def install_feature(feature, lis, lineno): # Return XML to go in types.xml. def make_feature_types_xml(): xml = [] - for (x, featvals) in feature_to_values.iteritems(): + for (x, featvals) in list(feature_to_values.items()): # FIXME! Figure out what's going wrong here. # typename = x -# print "fv_names_to_values: %s" % fv_names_to_values +# print("fv_names_to_values: %s" % fv_names_to_values) # if x in fv_names_to_values: # typename = fv_names_to_values[x] # xml += [['type', [('name', typename)]]] @@ -1063,7 +1056,7 @@ def make_feature_types_xml(): # Return XML to go in morph.xml. def make_feature_morph_xml(): xml = [] - for x in my_sorted(feature_values): + for x in sorted(feature_values): featval = feature_values[x] if featval.macrotie: entry = ['macro', [('name', '@%s' % x)]] @@ -1271,14 +1264,14 @@ slash : bareslash : $$ = makeslash($1, None, None) ############################# # Example: -# +# # Source: -# +# # s<1>[E] \ np<2>[X nom] / np<3>[Y acc] # # # XML output: -# +# # # # @@ -1340,7 +1333,7 @@ slash : bareslash : $$ = makeslash($1, None, None) # fs? # lf? # } - + # fs(id) { # (feat(attr='index') { # lf { nomvar(name) } } @@ -1350,8 +1343,8 @@ slash : bareslash : $$ = makeslash($1, None, None) # slash(dir=('/', '\\', '|'), mode=('.', '*', '^', 'x', 'x<', ''), # varmodality, ability=('inert', 'active')) - -# fs(id): + +# fs(id): # ... %p @@ -1489,7 +1482,7 @@ def create_tags(text): text.tag_config(x, foreground=fg) if bg: text.tag_config(x, background=bg) - + # A "draw-into" object, used for incrementally building up some text # in various fonts. Initialized with a parent widget and some initial text. @@ -1502,18 +1495,18 @@ class draw_into(object): borderwidth=0, relief=FLAT, background='white') self.curface = None - self.wid.slash_image = [] + self.wid.slash_image = [] self.curtext = '' create_tags(self.wid) - # Self.alltext maintains the length of the text printed - # for the current widget - self.alltext = 0 + # Self.alltext maintains the length of the text printed + # for the current widget + self.alltext = 0 - # FIXME: the tirgger for bigger height of the Text - # widget is arbitrarily set to 95. This should be - # driven by width of individual fonts and chars - self.expandTrigger = 95 + # FIXME: the tirgger for bigger height of the Text + # widget is arbitrarily set to 95. This should be + # driven by width of individual fonts and chars + self.expandTrigger = 95 def finish_run(self): if self.curtext: @@ -1522,6 +1515,7 @@ class draw_into(object): #Label(self.wid, text=self.curtext, # font=props['font']).pack(side=LEFT) self.curtext = '' + def text(self, tex, face='default'): if self.curface == face: self.curtext += tex @@ -1530,39 +1524,39 @@ class draw_into(object): self.curtext = tex self.curface = face - # Increase recorded length of text - self.alltext += len(tex) - - # Increase height if necessary - if (self.alltext > self.expandTrigger): - heightval = 3* (self.alltext/self.expandTrigger +1) - self.wid.config(height= heightval) + # Increase recorded length of text + self.alltext += len(tex) + # Increase height if necessary + if (self.alltext > self.expandTrigger): + heightval = 3* (self.alltext/self.expandTrigger +1) + self.wid.config(height= heightval) def finish(self): self.finish_run() self.wid.config(state=DISABLED) return self.wid + def image(self, img): - # When there is an image to be embedded - self.finish_run() - # Access the OPENCCG_HOME environment variable - # to determine the correct path for the images - openccg_home = os.environ['OPENCCG_HOME'] - gifdir = openccg_home+"/images/slashes/" - image = PhotoImage(file=gifdir+img) - # We are creating an instantiated variable here - # for the image, because the actual photo object is destroyed once - # the execution leaves the __init__ code. Without building it this way, - # the display was showing only a space for the image but not the image itself - self.wid.slash_image += [image] - self.wid.image_create(INSERT, image=image) + # When there is an image to be embedded + self.finish_run() + # Access the OPENCCG_HOME environment variable + # to determine the correct path for the images + openccg_home = os.environ['OPENCCG_HOME'] + gifdir = openccg_home+"/images/slashes/" + image = PhotoImage(file=gifdir+img) + # We are creating an instantiated variable here + # for the image, because the actual photo object is destroyed once + # the execution leaves the __init__ code. Without building it this way, + # the display was showing only a space for the image but not the image itself + self.wid.slash_image += [image] + self.wid.image_create(INSERT, image=image) def onHilite(self): - self.wid.config(bg = '#E9FFE3') + self.wid.config(bg = '#E9FFE3') def offHilite(self): - self.wid.config(bg = 'white') + self.wid.config(bg = 'white') def category_draw_children(into, chils, depth, vars, need_initial_comma=False, sep='', sepface='default'): @@ -1626,52 +1620,48 @@ def category_draw(into, xml, depth, vars): dir = getoptprop('dir', props, '|') mode = getoptprop('mode', props) ability = getoptprop('ability', props) -# into.text('%s' % dir, 'slash') -# into.text('%s%s' % (mode or '', -# ability_to_ability_value[ability] or ''), -# 'slash mode') - - # We create the file name here - # By interpreting various parameters - # and joiing them together as a string - - if dir == '\\': - slash_string = 'bk' - elif dir == '/': - slash_string = 'fd' - else: - slash_string = 'str' - - #slash_mode : X GREATER : $$ = 'x>' - # : LESS X : $$ = '':'cross_greater', - '':'greater', - '<':'lesser', - 'x':'cross', - '.':'dot', - '*':'star', - '^':'box'} - if mode == None: - image_string = slash_string + '.GIF' - else: - image_string = slash_string+ '_' + modelist[mode] + '.GIF' - - into.image(image_string) + + # We create the file name here + # By interpreting various parameters + # and joiing them together as a string + + if dir == '\\': + slash_string = 'bk' + elif dir == '/': + slash_string = 'fd' + else: + slash_string = 'str' + + # slash_mode : X GREATER : $$ = 'x>' + # : LESS X : $$ = '':'cross_greater', + '':'greater', + '<':'lesser', + 'x':'cross', + '.':'dot', + '*':'star', + '^':'box'} + if mode == None: + image_string = slash_string + '.GIF' + else: + image_string = slash_string+ '_' + modelist[mode] + '.GIF' + + into.image(image_string) elif ty == 'dollar': name = getoptprop('name', props) into.text('$', 'dollar') into.text('%s' % name, 'numeric index') else: - # Have commented the following assert Statement - # and the debug statement - # Because of validation errors + # Have commented the following assert Statement + # and the debug statement + # Because of validation errors #debug('ty??? %s\n' % ty) #assert False - dummy = 1 + pass %y @@ -1709,13 +1699,13 @@ complexcat : complexcat LBRACE cat_set_entry_0+ RBRACE : ############################# # Example: -# +# # Source: -# +# # E:action(* X:animate-being Y:sem-obj) -# +# # XML output: -# +# # # # @@ -1881,13 +1871,14 @@ def make_word_morph_xml(): elif y in pos_hash: pos = y else: - error(None, 'Family/part-of-speech %s not found (word declaration %s)', + error(None, 'Family/part-of-speech "%s" not found (word declaration "%s")', y, word) + raise ValueError('Family/part-of-speech "%s" not found (word declaration "%s")' % (y, word)) if pos not in word_pos_list: word_pos_list += [pos] for y in word_pos_list: # Make a copy of the word's XML and set the POS appropriately. - entry = x[:] + entry = copy.deepcopy(x) putprop('pos', y, entry[1]) xml += [entry] return xml @@ -1992,17 +1983,17 @@ class CSFamily(CSBlock): self.name = name self.props = props self.statements = statements - self.text = None - self.homeButton = None - self.btnFrame = None - self.menuHolder = None - - self.childFrame = None - self.cfile = None - self.cf = None - self.vars = None - self.canvas = None - self.mainFrame = None + self.text = None + self.homeButton = None + self.btnFrame = None + self.menuHolder = None + + self.childFrame = None + self.cfile = None + self.cf = None + self.vars = None + self.canvas = None + self.mainFrame = None def draw(self, childFrame, cfile, vars, row, canvas, mainFrame): # Draw the family name @@ -2010,19 +2001,19 @@ class CSFamily(CSBlock): cf = draw_into(f, width=20) cf.text('%s' % self.name, 'family name') - child_widget=cf.finish() - self.menuHolder = child_widget + child_widget = cf.finish() + self.menuHolder = child_widget child_widget.pack(fill=BOTH, expand=YES) - child_widget.bind("", self.editPopup) + child_widget.bind("", self.editPopup) - self.childFrame = childFrame - self.cfile = cfile - self.cf = cf - self.vars = vars - self.canvas = canvas - self.mainFrame = mainFrame + self.childFrame = childFrame + self.cfile = cfile + self.cf = cf + self.vars = vars + self.canvas = canvas + self.mainFrame = mainFrame f.grid(row=row, column=0, sticky=NSEW) @@ -2039,130 +2030,134 @@ class CSFamily(CSBlock): # Define the binding procedure for the right-click for editing an entry def editPopup(self, event): - popup = Menu(self.menuHolder, tearoff =0) - popup.add_command(label=' Edit ', command = lambda: self.editSection(self.childFrame, - self.cfile, - self.cf, - self.vars, - self.canvas, - self.mainFrame)) - try: - popup.tk_popup(event.x_root+40, event.y_root, 0) - finally: - popup.grab_release() - - # Now bind the right-click to the saveSection buttons - self.menuHolder.bind("", self.savePopup) + popup = Menu(self.menuHolder, tearoff =0) + popup.add_command(label=' Edit ', command=lambda: self.editSection(self.childFrame, + self.cfile, + self.cf, + self.vars, + self.canvas, + self.mainFrame)) + try: + popup.tk_popup(event.x_root+40, event.y_root, 0) + finally: + popup.grab_release() + + # Now bind the right-click to the saveSection buttons + self.menuHolder.bind("", self.savePopup) # Define the right click binding for the save entry def savePopup(self, event): - popup = Menu(self.menuHolder, tearoff = 0) - popup.add_command(label = 'Done', command = lambda: self.saveSection(self.childFrame, - self.cfile, - self.cf, - self.vars, - self.canvas, - self.mainFrame)) - popup.add_command(label = 'Home', command = lambda: self.editHome(self.cfile)) - - fileData = self.cfile.getAllText() - popup.add_command(label = 'Undo All', command = lambda: self.undoEdit(fileData, self.cfile)) - - try: - popup.tk_popup (event.x_root+40, event.y_root, 0) - finally: - popup.grab_release() - + popup = Menu(self.menuHolder, tearoff = 0) + popup.add_command(label='Done', command=lambda: self.saveSection(self.childFrame, + self.cfile, + self.cf, + self.vars, + self.canvas, + self.mainFrame)) + popup.add_command(label='Home', command=lambda: self.editHome(self.cfile)) + + fileData = self.cfile.getAllText() + popup.add_command(label='Undo All', command=lambda: self.undoEdit(fileData, self.cfile)) + + try: + popup.tk_popup (event.x_root+40, event.y_root, 0) + finally: + popup.grab_release() + # Edit a section, i.e. a family of the grammar individually rather than the entire grammar # Note that this will have very preliminary editing capabilities and the complete grammar # editing should be done through the Edit global view def editSection(self, childFrame, cfile, hiliteText, vars, canvas, mainFrame): editFrame = Frame(mainFrame, bd=1, background='white') - self.text = Text(editFrame, padx=5, wrap=None, undo = YES, background='white', height =10) - vbar = Scrollbar(editFrame) - hbar = Scrollbar(editFrame, orient='horizontal') + self.text = Text(editFrame, padx=5, wrap=None, undo=YES, background='white', height=10) + vbar = Scrollbar(editFrame) + hbar = Scrollbar(editFrame, orient='horizontal') - self.text.config(yscrollcommand=vbar.set) # call vbar.set on text move + self.text.config(yscrollcommand=vbar.set) # call vbar.set on text move self.text.config(xscrollcommand=hbar.set) vbar.config(command=self.text.yview) # call text.yview on scroll move hbar.config(command=self.text.xview) # or hbar['command']=text.xview - # Changing the mode of the cfile object here, - # so that once the uer clicks done, - # the whole object is recompiled and redisplayed - cfile.mode= 'Edit' - - # Highlight the row being edited - hiliteText.onHilite() - - vbar.pack(side=RIGHT, fill=Y) - hbar.pack(side=BOTTOM, fill=X) - self.text.pack(fill= BOTH, expand= YES) - - # Set a mark at the beginning of the text - self.text.mark_set("START", INSERT) - self.text.mark_gravity("START", LEFT) - - # Push in the rest of the file's contents - fileData = cfile.getAllText() - self.text.insert(INSERT, fileData) - - # Move the insert position to the first occurence of the family name - # FIXME: this is poor implementation - # The positioning of the insert cursor should be happening by parsing the - # CFG production rules, using CSFamily.prod.lineno and endlineno - self.text.config(takefocus=True) - idx= self.text.search('family '+ self.name, "START") - self.text.mark_set(CURRENT, idx) - self.text.see(CURRENT) - - #editFrame.grid(row=row+1, columnspan =3, sticky = NSEW) + # Changing the mode of the cfile object here, + # so that once the uer clicks done, + # the whole object is recompiled and redisplayed + cfile.mode= 'Edit' + + # Highlight the row being edited + hiliteText.onHilite() + + vbar.pack(side=RIGHT, fill=Y) + hbar.pack(side=BOTTOM, fill=X) + self.text.pack(fill= BOTH, expand= YES) + + # Set a mark at the beginning of the text + self.text.mark_set("START", INSERT) + self.text.mark_gravity("START", LEFT) + + # Push in the rest of the file's contents + fileData = cfile.getAllText() + self.text.insert(INSERT, fileData) + + # Move the insert position to the first occurence of the family name + # FIXME: this is poor implementation + # The positioning of the insert cursor should be happening by parsing the + # CFG production rules, using CSFamily.prod.lineno and endlineno + self.text.config(takefocus=True) + idx = self.text.search('family ' + self.name, "START") + idx = idx or self.text.search("family '" + self.name + "'", "START") + idx = idx or self.text.search('family "' + self.name + '"', "START") + self.text.mark_set(CURRENT, idx) + self.text.see(CURRENT) + + # editFrame.grid(row=row+1, columnspan =3, sticky = NSEW) editFrame.grid(row=2, columnspan =2, sticky = NSEW) - childFrame.update_idletasks() - canvas.config(scrollregion=canvas.bbox("all")) + childFrame.update_idletasks() + canvas.config(scrollregion=canvas.bbox("all")) # Finished editing - #def saveSection(self, childFrame, cfile, hiliteText, varset, canvas, mainFrame, homeButton, undoButton): + # def saveSection(self, childFrame, cfile, hiliteText, varset, canvas, mainFrame, homeButton, undoButton): def saveSection(self, childFrame, cfile, hiliteText, varset, canvas, mainFrame): - # We force the text contents of the cfile object to copy over - # all that is presently in the current text-box - cfile.setAllText(self.text.get(1.0,END)) + if self.text is None: + return + # We force the text contents of the cfile object to copy over + # all that is presently in the current text-box + cfile.setAllText(self.text.get(1.0,END)) - # Undo the highlight of the row - hiliteText.offHilite() + # Undo the highlight of the row + hiliteText.offHilite() - # Recompile whatever was edited and redisplay - # Note: changes are not saved hereby!! - cfile.compile_if_needed() - cfile.onLexicon() + # Recompile whatever was edited and redisplay + # Note: changes are not saved hereby!! + cfile.compile_if_needed() + cfile.onLexicon() - # Restore the right-click binding to the original - self.menuHolder.bind("", self.editPopup) + # Restore the right-click binding to the original + self.menuHolder.bind("", self.editPopup) # Restore view to original place where you wanted to edit def editHome(self, cfile): - # Move the insert position to the first occurence of the family name - # FIXME: this is poor implementation - # The positioning of the insert cursor should be happening by parsing the - # CFG production rules, using CSFamily.prod.lineno and endlineno - self.text.config(takefocus=True) - idx= self.text.search('family '+ self.name, "START") - - if not idx: - showwarning('Error', 'Original entry for '+self.name+ ' not found!') - self.text.mark_set(CURRENT, idx) - self.text.see(CURRENT) + # Move the insert position to the first occurence of the family name + # FIXME: this is poor implementation + # The positioning of the insert cursor should be happening by parsing the + # CFG production rules, using CSFamily.prod.lineno and endlineno + self.text.config(takefocus=True) + idx = self.text.search('family ' + self.name, "START") + + if not idx: + showwarning('Error', 'Original entry for '+self.name+ ' not found!') + self.text.mark_set(CURRENT, idx) + self.text.see(CURRENT) # Undo all editing done till now def undoEdit(self, fileData, cfile): - askqn = askokcancel('Warning','Undo all changes till now?') - if askqn: - self.text.delete("START", END) - self.text.insert(CURRENT, fileData) - self.editHome(cfile) + askqn = askokcancel('Warning','Undo all changes till now?') + if askqn: + self.text.delete("START", END) + self.text.insert(CURRENT, fileData) + self.editHome(cfile) + - # CSFamilyEntry is an `entry' statement inside a `family' block. # @@ -2231,7 +2226,7 @@ class CSFamilyMember(CSStatement): 'member') first = False - print len (self.items) + print(len(self.items)) cf.finish().pack(fill=BOTH, expand=YES) return f @@ -2296,10 +2291,10 @@ family_block : FAMILY word opt_paren_ext_attr_list LBRACE family_statement_list xml.extend(x.xml()) # If members have been specified ('member' statements) and there is no # 'closed' property, make the family closed. - + # if family_members[$2] and not property_specified('closed', xml[1]): # xml[1] += [('closed', 'true')] - + # Actually, we *always* need classes closed, due to a bizarreness in # OpenCCG. if not property_specified('closed', xml[1]): @@ -2339,7 +2334,7 @@ def init_rules(): ('xsub', '-') : False, ('typeraise', '+') : [(False, True, True)], ('typeraise', '-') : [(True, True, True)], - 'typechange' : [], + ('typechange', '') : [], } global rules_to_xml_mapping @@ -2358,7 +2353,7 @@ def save_rules(cur): def make_rules_xml(): xml = [] unique = 0 - for (key, value) in my_sorted(rules.items()): + for (key, value) in sorted(list(rules.items())): if type(key) is tuple and key[0] in rules_to_xml_mapping: rx = copy.deepcopy(rules_to_xml_mapping[key[0]]) rx[1] += [('dir', key[1] == '+' and 'forward' or 'backward')] @@ -2375,13 +2370,13 @@ def make_rules_xml(): + (result != True and [['result', [], result]] or [])) - elif key == 'typechange': + elif key == ('typechange', ''): for (arg, result, lf) in value: unique += 1 - if lf: - lf = [['lf', []] + lf] - else: - lf = [] + if lf: + lf = [['lf', []] + lf] + else: + lf = [] xml.append(['typechanging', [('name', 'typechange-%d' % unique)], ['arg', [], arg], ['result', [], result + lf]]) @@ -2399,7 +2394,7 @@ def rulesreinit(): rules.clear() rules[('typeraise', '+')] = [] rules[('typeraise', '-')] = [] - rules['typechange'] = [] + rules[('typechange', '')] = [] %y ruletype : APP | COMP | XCOMP | SUB | XSUB @@ -2424,7 +2419,7 @@ rule : NO SEMI : rulesreinit() rules[('typeraise', '+')] = []; rules[('typeraise', '-')] = [] : NO TYPERAISE PLUS SEMI : rules[('typeraise', '+')] = [] : NO TYPERAISE MINUS SEMI : rules[('typeraise', '-')] = [] - : NO TYPECHANGE SEMI : rules['typechange'] = [] + : NO TYPECHANGE SEMI : rules[('typechange', '')] = [] : ruletype PLUSMINUS SEMI : \ rules[($1, '+')] = True; rules[($1, '-')] = True : ruletype PLUS SEMI : rules[($1, '+')] = True @@ -2434,9 +2429,9 @@ rule : NO SEMI : rulesreinit() : TYPERAISE plusminus_spec opt_dollar COLON complexcat GOESTO opt_atomcat SEMI: dotyperaise($2, $3, $5, $7) : TYPECHANGE COLON complexcat GOESTO complexcat SEMI: \ - rules['typechange'] += [($3, $5, None)] + rules[('typechange', '')] += [($3, $5, None)] : TYPECHANGE COLON complexcat GOESTO complexcat COLON hybrid_logic SEMI: \ - rules['typechange'] += [($3, $5, $7)] + rules[('typechange', '')] += [($3, $5, $7)] rule_list : rule_list rule : empty @@ -2571,7 +2566,7 @@ class MacroLexer(StackLexer): if not tok: raise SyntaxError("Unexpected EOF") return tok - + def innertoken(self): macrotok = self.simpletoken() if not macrotok or no_macro_sub or \ @@ -2625,7 +2620,7 @@ class MacroLexer(StackLexer): macrotok.value) else: if super_macro_debug: - print "Processing macro: %s" % macrotok.value + print("Processing macro: %s" % macrotok.value) self.pushstack(macrosub(macrodef, args, self.lineno)) return self.innertoken() @@ -2634,7 +2629,7 @@ class MacroLexer(StackLexer): def newline(num=1): outout('\n' * num) outout(' ' * 2 * self.indentlevel) - + if tok.lineno and self.lineno < tok.lineno: if tok.lineno - self.lineno == 1: newline() @@ -2648,7 +2643,7 @@ class MacroLexer(StackLexer): value = str(tok.value) lastval = self.last_token and str(self.last_token.value) if value and lastval and ((isalnumund(lastval[0]) and - isalnumund(value[0])) + isalnumund(value[0])) or self.last_token.type in ('COLON', 'COMMA')): outout(' ') @@ -2668,7 +2663,7 @@ class MacroLexer(StackLexer): tok and tok.type != 'BOGUS_VALUE': pretty_output_transformed(tok) self.last_token = tok - # print "Saw token: %s" % tok + # print("Saw token: %s" % tok) return tok ############################# @@ -2685,10 +2680,10 @@ def init_parse_once(): class parse_results: pass -def parse_string(str): +def parse_string(string): retval = parse_results() - if str: - retval.parse = yacc.parse(str, lexer=MacroLexer(globallexer)) + if string: + retval.parse = yacc.parse(string, lexer=MacroLexer(globallexer)) else: retval.parse = [] save_global_state(retval) @@ -2705,13 +2700,13 @@ def draw_parse(parse, cfile, childFrame, vars, canvas, mainFrame): row = 0 if parse: - for x in parse: - if hasattr(x, 'draw'): - x.draw(childFrame, cfile, vars, row, canvas, mainFrame) - row += 1 - # Make the column containing the lexical entries expand as necessary - childFrame.columnconfigure(1, weight=1) - #frame.grid(column=0) + for x in parse: + if hasattr(x, 'draw'): + x.draw(childFrame, cfile, vars, row, canvas, mainFrame) + row += 1 + # Make the column containing the lexical entries expand as necessary + childFrame.columnconfigure(1, weight=1) + #frame.grid(column=0) @@ -2760,7 +2755,7 @@ def late_init_graphics(): if not late_init_graphics_done: late_init_draw_once() late_init_graphics_done = 1 - + ############################# # Main driver # @@ -2826,33 +2821,34 @@ def main(): output_files.append(x) else: output_files = [x for x in output_file_map] - + # Now actually parse the input arguments prefix = options.prefix lastfile = '-' args = global_args or ['-'] - + for arg in args: if arg == '-': if not options.quiet: errout("ccg2xml: Processing standard input\n") fil = sys.stdin + retval = parse_string(fil.read()) else: if not options.quiet: errout("ccg2xml: Processing %s\n" % arg) - fil = file(arg) - lastfile = arg - if prefix == None: - (phead, ptail) = os.path.split(arg) - (pbase, pext) = os.path.splitext(ptail) - prefix = '%s-' % pbase - retval = parse_string(fil.read()) - # print "Retval: %s\n" % retval - + with open(arg) as fil: + lastfile = arg + if prefix == None: + (phead, ptail) = os.path.split(arg) + (pbase, pext) = os.path.splitext(ptail) + prefix = '%s-' % pbase + retval = parse_string(fil.read()) + # print("Retval: %s\n" % retval) + if macro_debug: print_macros() - + # Make output directory if needed, and output files if error_count > 0: @@ -2865,7 +2861,7 @@ def main(): os.makedirs(options.dir) else: options.dir = '.' - + for x in output_files: file_info = output_file_map[x] output_xml_file(prefix, lastfile, x, file_info[0], diff --git a/src/ccg2xml/ccg_editor.py b/src/ccg2xml/ccg_editor.py index d33f200..3348ce0 100755 --- a/src/ccg2xml/ccg_editor.py +++ b/src/ccg2xml/ccg_editor.py @@ -17,21 +17,22 @@ # This code is based on PyEdit version 1.1, from Oreilly's Programming # Python, 2nd Edition, 2001, by Mark Lutz. -from Tkinter import * # base widgets, constants -from tkFileDialog import * # standard dialogs -from tkMessageBox import * -from tkSimpleDialog import * -from tkColorChooser import askcolor -from ScrolledText import ScrolledText -from string import split, atoi -import sys, os, string, md5 +from tkinter import * # base widgets, constants +from tkinter.filedialog import * # standard dialogs +from tkinter.messagebox import * +from tkinter.simpledialog import * +from tkinter.colorchooser import askcolor +from tkinter.ttk import * +import sys +import os +import hashlib import ccg2xml import Tree import re -START = '1.0' # index of first char: row=1,col=0 -SEL_FIRST = SEL + '.first' # map sel tag to index -SEL_LAST = SEL + '.last' # same as 'sel.last' +START = '1.0' # index of first char: row=1,col=0 +SEL_FIRST = SEL + '.first' # map sel tag to index +SEL_LAST = SEL + '.last' # same as 'sel.last' FontScale = 0 # use bigger font on linux if sys.platform[:3] != 'win': # and other non-windows boxes @@ -47,26 +48,39 @@ openfiles = {} filenames = [] + +def set_ttk_styles(): + sty = Style() + sty.configure("Main.TFrame", relief=SUNKEN) + sty = Style() + sty.configure("Child.TFrame", relief=SUNKEN, border=2) + sty = Style() + sty.configure("TBSelected.TButton", relief=SUNKEN) + sty = Style() + sty.configure("TestBed.TLabel", relief=SUNKEN, border=1, + foreground='#77AA77', font=("Helvetica", FontScale+12)) + + class CTab(Frame): # Initialize this tab. Usually called from a subclass. PARENT is # the parent widget, CFILE the CFile object associated with the # top-level window, and TABNAME is the name of this tab (that tab # will be removed from the toolbar). def __init__(self, parent, cfile, tabname): - Frame.__init__(self, parent) + Frame.__init__(self, parent, style='Main.TFrame') self.parent = parent + self.tabname = tabname self.cfile = cfile - self.toolbar = None self.checkbar = None self.menubar = [ - ('File', 0, - [('Open...', 0, self.cfile.onOpen), - ('New', 0, self.cfile.onNew), - ('Save', 0, self.onSave), - ('Save As...', 5, self.onSaveAs), - ('Close', 0, self.cfile.onClose), - 'separator', - ('Quit VisCCG', 0, self.cfile.onQuit)] + ('File', 0, + [('Open...', 0, self.cfile.onOpen), + ('New', 0, self.cfile.onNew), + ('Save', 0, self.onSave), + ('Save As...', 5, self.onSaveAs), + ('Close', 0, self.cfile.onClose), + 'separator', + ('Quit VisCCG', 0, self.cfile.onQuit)] ), ('Tools', 0, [('Font List', 0, self.cfile.onFontList), @@ -74,11 +88,11 @@ def __init__(self, parent, cfile, tabname): ('Pick Fg...', 0, self.cfile.onPickFg), ('Color List', 0, self.cfile.onColorList), 'separator', - ('Info...', 0, self.cfile.onInfo), - ] - )] + ('Info...', 0, self.cfile.onInfo)] + ) + ] self.toolbar = [ - #('Display', self.cfile.onDisplay, {'side': LEFT}), + # ('Display', self.cfile.onDisplay, {'side': LEFT}), ('Edit', self.cfile.onEdit, {'side': LEFT}), ('Lexicon', self.cfile.onLexicon, {'side': LEFT}), ('Testbed', self.cfile.onTestbed, {'side': LEFT}), @@ -88,7 +102,7 @@ def __init__(self, parent, cfile, tabname): ('Quit', self.cfile.onClose, {'side': RIGHT}), ('Help', self.cfile.help, {'side': RIGHT}), ('Save', self.onSave, {'side': RIGHT}), - ] + ] # self.remove_toolbar_button(tabname) # Add MENU (a tuple corresponding to a single top-level menu item) @@ -130,145 +144,139 @@ def onSaveAs(self, forcefile=None): else: self.cfile.setFileName(file) # may be newly created self.cfile.edit_modified(NO) - self.cfile.last_save_signature = self.cfile.getSignature(text) + self.cfile.last_save_signature = self.cfile.getSignature(text) class CEdit(CTab): def __init__(self, parent, cfile): CTab.__init__(self, parent, cfile, 'Edit') - self.debugFrame= None - - # Add a frame here, so that debug mode can be enabled - # by embedding other objects within this frame - editFrame = Frame(self, bd=1, bg= 'white') - editFrame.pack(fill=BOTH, expand=YES, side=TOP) - - # Add a button frame, embed the button and - # link to command for the debug mode - btnFrame = Frame(editFrame, bd = 1) - btnFrame.grid (row=0, columnspan=3, sticky=NSEW) - - vldButton = Button (btnFrame, text='Validate', command = lambda: self.onValidate(editFrame, cfile)) - vldButton.pack(side=RIGHT) - - # Put the main edit window in the row below this - vbar = Scrollbar(editFrame) - hbar = Scrollbar(editFrame, orient='horizontal') - self.text = Text(editFrame, padx=5, wrap='none', undo=YES) + self.debugFrame = None + + # Add a frame here, so that debug mode can be enabled + # by embedding other objects within this frame + editFrame = Frame(self, style='Child.TFrame') + editFrame.pack(fill=BOTH, expand=YES, side=TOP) + + # Add a button frame, embed the button and + # link to command for the debug mode + btnFrame = Frame(editFrame, style='Child.TFrame') + btnFrame.grid (row=0, columnspan=3, sticky=NSEW) + + vldButton = Button (btnFrame, text='Validate', command = lambda: self.onValidate(editFrame, cfile)) + vldButton.pack(side=RIGHT) + + # Put the main edit window in the row below this + vbar = Scrollbar(editFrame) + hbar = Scrollbar(editFrame, orient='horizontal') + self.text = Text(editFrame, padx=5, wrap='none', undo=YES) vbar.grid(row=1, column=2, sticky=NS) hbar.grid(row=2, columnspan=2, sticky=EW) # pack text last self.text.grid(row=1, column=1, sticky=NSEW) # else sbars clipped - editFrame.columnconfigure(1, weight=1) - editFrame.rowconfigure(1, weight=1) + editFrame.columnconfigure(1, weight=1) + editFrame.rowconfigure(1, weight=1) - # Add a list containing line numbers - self.lineList = Text(editFrame, relief=SUNKEN, bg='white', bd=2, yscrollcommand = vbar.set, width=3) - self.lineList.grid(row=1, column=0, sticky=NS) + # Add a list containing line numbers + self.lineList = Text(editFrame, relief=SUNKEN, bg='white', bd=2, yscrollcommand = vbar.set, width=3) + self.lineList.grid(row=1, column=0, sticky=NS) self.lineList.config(font=self.cfile.fonts[0], - bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) - - - # TODO: The first time the display of the line numbers - # strangely doesn't go through --- somehow cfile - # isn't initialized. However, it works properly in the display. - # Need to understand why this happens. - - try: - self.showLineNums() - except KeyError: - dummy =1 - - self.text.config(yscrollcommand=vbar.set) # call vbar.set on text move - self.text.config(xscrollcommand=hbar.set) - #vbar.config(command=text.yview) # call text.yview on scroll move - hbar.config(command=self.text.xview) # or hbar['command']=text.xview + bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) - self.text.config(font=self.cfile.fonts[0], - bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) + # TODO: The first time the display of the line numbers + # strangely doesn't go through --- somehow cfile + # isn't initialized. However, it works properly in the display. + # Need to understand why this happens. + try: + self.showLineNums() + except KeyError: + self.text.config(yscrollcommand=vbar.set) # call vbar.set on text move + self.text.config(xscrollcommand=hbar.set) + #vbar.config(command=text.yview) # call text.yview on scroll move + hbar.config(command=self.text.xview) # or hbar['command']=text.xview + + self.text.config(font=self.cfile.fonts[0], + bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) - #Setting the movement of the listbox and the text - #together to be controlled by the scrollbar - vbar.config(command=self.scrollSet) + #Setting the movement of the listbox and the text + #together to be controlled by the scrollbar + vbar.config(command=self.scrollSet) self.add_menu('File', ('Edit', 0, - [('Cut', 0, self.onCut), - ('Copy', 1, self.onCopy), - ('Paste', 0, self.onPaste), - 'separator', - ('Delete', 0, self.onDelete), - ('Select All', 0, self.onSelectAll)] - )) + [('Cut', 0, self.onCut), + ('Copy', 1, self.onCopy), + ('Paste', 0, self.onPaste), + 'separator', + ('Delete', 0, self.onDelete), + ('Select All', 0, self.onSelectAll)] + )) self.add_menu('Edit', ('Search', 0, - [('Goto...', 0, self.cfile.onGoto), - ('Find...', 0, self.cfile.onFind), - ('Refind', 0, self.cfile.onRefind), - ('Change...', 0, self.onChange)] - )) - #print "Will exit the init function now" + [('Goto...', 0, self.cfile.onGoto), + ('Find...', 0, self.cfile.onFind), + ('Refind', 0, self.cfile.onRefind), + ('Change...', 0, self.onChange)] + )) def scrollSet(self, *args): - self.lineList.yview(*args) - self.text.yview(*args) + self.lineList.yview(*args) + self.text.yview(*args) def reinit(self): - self.showLineNums() + self.showLineNums() self.text.focus() def showLineNums(self): - #Make the list of lines editable - self.lineList.config(state=NORMAL) - textData = self.cfile.getAllText() - listOfLines = textData.split('\n') - for num in range(1,len(listOfLines)): - self.lineList.insert(END,"%s\n" % num) - #Now that we are done changing the number of lines, - #we reset the text to be uneditable - self.lineList.config(state=NORMAL) + # Make the list of lines editable + self.lineList.config(state=NORMAL) + textData = self.cfile.getAllText() + listOfLines = textData.splitlines() + for num in range(1,len(listOfLines)): + self.lineList.insert(END,"%s\n" % num) + # Now that we are done changing the number of lines, + # we reset the text to be uneditable + self.lineList.config(state=NORMAL) def onValidate(self, editFrame, cfile): - #showwarning(title= 'Sorry', message='Validate and debug feature coming soon!') - # Destroy previous display of debug or error messages - # if present - if self.debugFrame: - self.debugFrame.grid_forget() + # Destroy previous display of debug or error messages + # if present + if self.debugFrame: + self.debugFrame.grid_forget() - # Compile if file signature has changed - cfile.compile_if_needed() + # Compile if file signature has changed + cfile.compile_if_needed() - # Now, call the error debug routine if errors are found - if (ccg2xml.error_count > 0): - self.debugError(editFrame, cfile) - else: - showinfo(title='VisCCG: Success', message='No validation errors!') + # Now, call the error debug routine if errors are found + if (ccg2xml.error_count > 0): + self.debugError(editFrame, cfile) + else: + showinfo(title='VisCCG: Success', message='No validation errors!') def debugError(self, editFrame, cfile): - self.debugFrame = Frame(editFrame, bg='white', bd=2) - self.debugFrame.grid(row=3, columnspan=2, sticky=NSEW) + self.debugFrame = Frame(editFrame, bg='white', bd=2) + self.debugFrame.grid(row=3, columnspan=2, sticky=NSEW) - # Create Listbox and scrollbars - sbar = Scrollbar(self.debugFrame) - list = Listbox(self.debugFrame, relief=SUNKEN, bg='white', bd=2, yscrollcommand = sbar.set) - sbar.config(command=list.yview) - list.pack(fill=BOTH, side=LEFT, expand=YES) - sbar.pack(fill=Y, side=RIGHT) + # Create Listbox and scrollbars + sbar = Scrollbar(self.debugFrame) + list = Listbox(self.debugFrame, relief=SUNKEN, bg='white', bd=2, yscrollcommand = sbar.set) + sbar.config(command=list.yview) + list.pack(fill=BOTH, side=LEFT, expand=YES) + sbar.pack(fill=Y, side=RIGHT) - # Display each message in the log + # Display each message in the log for mesg in ccg2xml.message_log: - type = mesg[0] - lineno = mesg[1] - errwarn = mesg[2] + type = mesg[0] + lineno = mesg[1] + errwarn = mesg[2] - if lineno: - dispError = type+' at Line '+str(lineno)+': '+errwarn - else: - dispError = type+': '+errwarn + if lineno: + dispError = type+' at Line '+str(lineno)+': '+errwarn + else: + dispError = type+': '+errwarn - list.insert(END, dispError) + list.insert(END, dispError) ##################### # Edit menu commands @@ -278,8 +286,8 @@ def onCopy(self): # get text selected by mouse,etc if not self.text.tag_ranges(SEL): # save in cross-app clipboard showerror('CCG Editor', 'No text selected') else: - text = self.text.get(SEL_FIRST, SEL_LAST) - self.clipboard_clear() + text = self.text.get(SEL_FIRST, SEL_LAST) + self.clipboard_clear() self.clipboard_append(text) def onDelete(self): # delete selected text, no save @@ -291,7 +299,7 @@ def onDelete(self): # delete selected text, no save def onCut(self): if not self.text.tag_ranges(SEL): showerror('CCG Editor', 'No text selected') - else: + else: self.onCopy() # save and delete selected text self.onDelete() @@ -302,19 +310,19 @@ def onPaste(self): showerror('CCG Editor', 'Nothing to paste') return self.text.insert(INSERT, text) # add at current insert cursor - self.text.tag_remove(SEL, '1.0', END) + self.text.tag_remove(SEL, '1.0', END) self.text.tag_add(SEL, INSERT+'-%dc' % len(text), INSERT) self.text.see(INSERT) # select it, so it can be cut def onSelectAll(self): - self.text.tag_add(SEL, '1.0', END+'-1c') # select entire text + self.text.tag_add(SEL, '1.0', END+'-1c') # select entire text self.text.mark_set(INSERT, '1.0') # move insert point to top self.text.see(INSERT) # scroll to top ####################### # Search menu commands ####################### - + def onChange(self): new = Toplevel(self) Label(new, text='Find text:').grid(row=0, column=0) @@ -323,9 +331,9 @@ def onChange(self): self.change2 = Entry(new) self.change1.grid(row=0, column=1, sticky=EW) self.change2.grid(row=1, column=1, sticky=EW) - Button(new, text='Find', + Button(new, text='Find', command=self.onDoFind).grid(row=0, column=2, sticky=EW) - Button(new, text='Apply', + Button(new, text='Apply', command=self.onDoChange).grid(row=1, column=2, sticky=EW) new.columnconfigure(1, weight=1) # expandable entrys @@ -345,7 +353,7 @@ def onDoChange(self): #################################### def isEmpty(self): - return not self.getAllText() + return not self.getAllText() def getAllText(self): return self.text.get('1.0', END+'-1c') # extract text as a string @@ -353,82 +361,49 @@ def getAllText(self): def setAllText(self, text): self.text.delete('1.0', END) # store text string in widget self.text.insert(END, text) # or '1.0' - self.text.mark_set(INSERT, '1.0') # move insert point to top + self.text.mark_set(INSERT, '1.0') # move insert point to top self.text.see(INSERT) # scroll to top, insert set self.cfile.edit_modified(NO) def clearAllText(self): - self.text.delete('1.0', END) # clear text in widget - -# class CDisplay(CTab): -# def __init__(self, parent, cfile): -# CTab.__init__(self, parent, cfile, 'Display') - -# # Use built-in text-with-scrollbar widget -# text = ScrolledText(self) -# text.config(font=self.cfile.fonts[0], -# bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) -# #text.config(font=('courier', 10, 'normal')) # use fixed-width font -# text.pack(side=TOP, fill=BOTH, expand=YES) - -# text.config(font=self.cfile.fonts[0], -# bg=self.cfile.colors[0]['bg'], fg=self.cfile.colors[0]['fg']) -# self.text = text -# -# self.add_menu('Edit', -# ('Search', 0, -# [('Goto...', 0, self.cfile.onGoto), -# ('Find...', 0, self.cfile.onFind), -# ('Refind', 0, self.cfile.onRefind), -# )) -# -# def setAllText(self, text): -# self.text.config(state=NORMAL) -# self.text.delete('1.0', END) # store text string in widget -# self.text.insert(END, text) # or '1.0' -# self.text.mark_set(INSERT, '1.0') # move insert point to top -# self.text.see(INSERT) # scroll to top, insert set -# self.text.config(state=DISABLED) - -# def reinit(self): -# self.setAllText(self.cfile.getAllText()) -# self.text.focus() + self.text.delete('1.0', END) # clear text in widget + class CWords(CTab): def __init__(self, parent, cfile): CTab.__init__(self, parent, cfile, 'Words') - self.child=None - self.wordList = None - self.cfile = cfile + self.child=None + self.wordList = None + self.cfile = cfile # Called when we switch to this mode using the toolbar at top. def reinit(self): if self.child: self.child.pack_forget() - self.child = Frame(self, background='white') + self.child = Frame(self, style='Child.TFrame') self.child.pack(expand=YES, fill=BOTH) - scrollbar = Scrollbar(self.child, orient=VERTICAL) - self.wordList = Listbox(self.child, yscrollcommand=scrollbar.set) - self.wordList.grid(row=0, column=0, sticky=N+S+E+W) + scrollbar = Scrollbar(self.child, orient=VERTICAL) + self.wordList = Listbox(self.child, yscrollcommand=scrollbar.set) + self.wordList.grid(row=0, column=0, sticky=N+S+E+W) - scrollbar.config(command= self.wordList.yview) - scrollbar.grid(row=0, column=1, sticky=N+S) + scrollbar.config(command= self.wordList.yview) + scrollbar.grid(row=0, column=1, sticky=N+S) - self.child.grid_rowconfigure(0, weight=1) - self.child.grid_columnconfigure(0, weight=1) + self.child.grid_rowconfigure(0, weight=1) + self.child.grid_columnconfigure(0, weight=1) - #If the data hasn't been compiled yet, then do so - try: - dummy = ccg2xml.morph_xml - except: - self.cfile.compile_if_needed() - #Adding dummy code for all words - for x in ccg2xml.morph_xml: - assert x[0] == 'entry' - self.wordList.insert (END, ccg2xml.getprop('word', x[1])) - #print ccg2xml.getprop('word', x[1]) + #If the data hasn't been compiled yet, then do so + try: + dummy = ccg2xml.morph_xml + except: + self.cfile.compile_if_needed() + #Adding dummy code for all words + for x in ccg2xml.morph_xml: + assert x[0] == 'entry' + self.wordList.insert (END, ccg2xml.getprop('word', x[1])) + # print(ccg2xml.getprop('word', x[1])) class CLexicon(CTab): class lexicon_vars(object): @@ -445,8 +420,8 @@ def __init__(self): def __init__(self, parent, cfile): CTab.__init__(self, parent, cfile, 'Lexicon') self.child = None - self.cnv = None - self.mainFrame = None + self.cnv = None + self.mainFrame = None self.vars = self.lexicon_vars() # FIXME? It's a bit awkward that ccg.ply has references to the @@ -457,74 +432,48 @@ def __init__(self, parent, cfile): ("Show features", self.vars.show_feat_struct), ('Full-form features', self.vars.show_full_features), ('Show semantics', self.vars.show_semantics), - ] + ] # Called when we switch to this mode using the toolbar at top. def reinit(self): self.redraw() - # Called when a change is made to a checkbox setting. - # FIXME: There may be a smarter way to do this. -# def redraw(self): -# self.cfile.compile_if_needed() -# if self.child: -# #self.cnv.pack_forget() -# self.child.pack_forget() -# -# self.child = Frame(self, bd=2, relief=SUNKEN, background='white') -# -# self.child.pack(expand=YES, fill=BOTH) -# -# self.child.rowconfigure(1, weight=1) -# self.child.columnconfigure(1, weight=1) -# -# ccg2xml.draw_parse(self.cfile.curparse.parse, self.cfile, self.child, -# self.vars) - def redraw(self): - self.cfile.compile_if_needed() - if self.child: - self.child.pack_forget() - if self.mainFrame: - self.mainFrame.pack_forget() - - self.mainFrame = Frame(self, bd=1, bg='white') - self.mainFrame.pack_propagate(0) - self.mainFrame.pack(expand=YES, fill=BOTH) + self.cfile.compile_if_needed() + if self.child: + self.child.pack_forget() + if self.mainFrame: + self.mainFrame.pack_forget() - self.mainFrame.grid_rowconfigure(0, weight=1) - self.mainFrame.grid_columnconfigure(0, weight=1) + self.mainFrame = Frame(self, style='Main.TFrame') + self.mainFrame.pack_propagate(0) + self.mainFrame.pack(expand=YES, fill=BOTH) - xscrollbar = Scrollbar(self.mainFrame, orient=HORIZONTAL) - xscrollbar.grid(row=1, column=0, sticky=E+W) + self.mainFrame.grid_rowconfigure(0, weight=1) + self.mainFrame.grid_columnconfigure(0, weight=1) - yscrollbar = Scrollbar(self.mainFrame) - yscrollbar.grid(row=0, column=1, sticky=N+S) + xscrollbar = Scrollbar(self.mainFrame, orient=HORIZONTAL) + xscrollbar.grid(row=1, column=0, sticky=E+W) - self.cnv= Canvas(self.mainFrame, bd=2, xscrollcommand=xscrollbar.set, - yscrollcommand=yscrollbar.set, width = 847, height=369) - #height= self.cfile.top.cget('height'), - #width = self.cfile.top.cget('width')) + yscrollbar = Scrollbar(self.mainFrame) + yscrollbar.grid(row=0, column=1, sticky=N+S) - #print self.cfile.top.cget('height') - #print self.cfile.top.cget('width') + self.cnv = Canvas(self.mainFrame, bd=2, xscrollcommand=xscrollbar.set, + yscrollcommand=yscrollbar.set, width = 847, height=369) - xscrollbar.config(command= self.cnv.xview) - yscrollbar.config(command= self.cnv.yview) + xscrollbar.config(command= self.cnv.xview) + yscrollbar.config(command= self.cnv.yview) - self.child = Frame(self.cnv, bd=2, relief=SUNKEN, background='white') - #self.child.grid_rowconfigure(0, weight=1) - #self.child.grid_columnconfigure(0, weight=1) + self.child = Frame(self.cnv, style='Child.TFrame') - self.cnv.create_window(0, 0, anchor='nw', window=self.child) + self.cnv.create_window(0, 0, anchor='nw', window=self.child) - ccg2xml.draw_parse(self.cfile.curparse.parse, self.cfile, self.child, self.vars, self.cnv, self.mainFrame) + ccg2xml.draw_parse(self.cfile.curparse.parse, self.cfile, self.child, self.vars, self.cnv, self.mainFrame) - self.child.update_idletasks() - #self.child.grid(row=0, column=0, sticky=NSEW) + self.child.update_idletasks() - self.cnv.config(scrollregion=self.cnv.bbox("all")) - self.cnv.grid(row=0, column=0, sticky='NSEW') + self.cnv.config(scrollregion=self.cnv.bbox("all")) + self.cnv.grid(row=0, column=0, sticky='NSEW') class CRules(CTab): @@ -534,106 +483,104 @@ def __init__(self, parent, cfile): class CFeatures(CTab): def __init__(self, parent, cfile): CTab.__init__(self, parent, cfile, 'Features') - self.child=None - self.checkbar=None - self.edit=None - self.text=None + self.child=None + self.checkbar=None + self.edit=None + self.text=None # Called when we switch to this mode using the toolbar at top. def reinit(self): if self.child: self.child.pack_forget() - self.child = Frame(self, background='white', width = 847, height = 369) + self.child = Frame(self, style='Child.TFrame', width=847, height=369) self.child.pack(expand=YES, fill=BOTH) - butframe = Frame(self.child, cursor='hand2', - relief=SUNKEN, bd=2) + butframe = Frame(self.child, cursor='hand2', style='Child.TFrame') butframe.pack(fill=X) but1 = Button(butframe, text='Expand All', command=self.expand_all) but1.pack(side=LEFT) but2 = Button(butframe, text='Contract All', command=self.contract_all) but2.pack(side=LEFT) - # Force editing in the same frame: but a lower view: - # pass self.child as the parent frame - self.edit = Button(butframe, text='Edit', command= lambda:self.edit_tree(self.child)) + # Force editing in the same frame: but a lower view: + # pass self.child as the parent frame + self.edit = Button(butframe, text='Edit', command=lambda:self.edit_tree(self.child)) self.edit.pack(side=RIGHT) - featframe = Frame(self.child, bd=2, relief=SUNKEN, - background='white') + featframe = Frame(self.child, style='Child.TFrame') featframe.pack(expand=YES, fill=BOTH) self.cfile.compile_if_needed() - # Build the tree - self.tree={} - self.root_name = re.sub(r'^(.*)\.(.*)$', r'\1', self.cfile.file) - self.tree[self.root_name]=[] - for feat in self.cfile.curparse.feature_to_values: - self.tree[self.root_name] += [str(feat)] - for feat in self.cfile.curparse.feature_to_values: - self.tree[feat] = [] - - for x in self.cfile.curparse.feature_to_values[feat]: - if x.name not in self.tree : - self.tree[x.name] = [] - - for x in self.cfile.curparse.feature_to_values[feat]: - if x.parents: - par = x.parents[0] - self.tree[par.name] += [x.name] - else: - self.tree[feat] += [x.name] - - # Define the images for opened and closed categories - shut_icon=PhotoImage(data='R0lGODlhCQAQAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' - 'AAAJABAAQAIdhI8hu2EqXIroyQrb\nyRf0VG0UxnSZ5jFjulrhaxQ' - 'AO6olVwAAOw==') - open_icon=PhotoImage(data='R0lGODlhEAAJAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' - 'AAAQAAkAQAIahI+pyyEPg3KwPrko\nTqH7/yGUJWxcZTapUQAAO8b' - 'yUgAAOw==') - - # Create the tree - self.t=Tree.Tree(master=featframe, - root_id='', - root_label=self.root_name, - collapsed_icon = shut_icon, - expanded_icon = open_icon, - get_contents_callback = self.get_treedata, - line_flag=False) - - self.t.grid(row=0, column=0, sticky = 'nsew') - - featframe.grid_rowconfigure(0, weight=1) - featframe.grid_columnconfigure(0, weight=1) - - sb=Scrollbar(featframe) - sb.grid(row=0, column=1, sticky='ns') - self.t.configure(yscrollcommand=sb.set) - sb.configure(command=self.t.yview) - - sb=Scrollbar(featframe, orient=HORIZONTAL) - sb.grid(row=1, column=0, sticky='ew') - self.t.configure(xscrollcommand=sb.set) - sb.configure(command=self.t.xview) - - # Expand the whole tree out - self.expand_tree(self.t.root) + # Build the tree + self.tree={} + self.root_name = re.sub(r'^(.*)\.(.*)$', r'\1', self.cfile.file) + self.tree[self.root_name]=[] + for feat in self.cfile.curparse.feature_to_values: + self.tree[self.root_name] += [str(feat)] + for feat in self.cfile.curparse.feature_to_values: + self.tree[feat] = [] + + for x in self.cfile.curparse.feature_to_values[feat]: + if x.name not in self.tree: + self.tree[x.name] = [] + + for x in self.cfile.curparse.feature_to_values[feat]: + if x.parents: + par = x.parents[0] + self.tree[par.name] += [x.name] + else: + self.tree[feat] += [x.name] + + # Define the images for opened and closed categories + shut_icon=PhotoImage(data='R0lGODlhCQAQAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' + 'AAAJABAAQAIdhI8hu2EqXIroyQrb\nyRf0VG0UxnSZ5jFjulrhaxQ' + 'AO6olVwAAOw==') + open_icon=PhotoImage(data='R0lGODlhEAAJAJH/AMDAwAAAAGnD/wAAACH5BAEAAAAALAAA' + 'AAAQAAkAQAIahI+pyyEPg3KwPrko\nTqH7/yGUJWxcZTapUQAAO8b' + 'yUgAAOw==') + + # Create the tree + self.t=Tree.Tree(master=featframe, + root_id='', + root_label=self.root_name, + collapsed_icon=shut_icon, + expanded_icon=open_icon, + get_contents_callback=self.get_treedata, + line_flag=False) + + self.t.grid(row=0, column=0, sticky = 'nsew') + + featframe.grid_rowconfigure(0, weight=1) + featframe.grid_columnconfigure(0, weight=1) + + sb=Scrollbar(featframe) + sb.grid(row=0, column=1, sticky='ns') + self.t.configure(yscrollcommand=sb.set) + sb.configure(command=self.t.yview) + + sb=Scrollbar(featframe, orient=HORIZONTAL) + sb.grid(row=1, column=0, sticky='ew') + self.t.configure(xscrollcommand=sb.set) + sb.configure(command=self.t.xview) + + # Expand the whole tree out + self.expand_tree(self.t.root) # Returns the nodes rooted at the node passed and adds them to the tree def get_treedata(self,node): - lbl = str(node.get_label()) - children = self.tree[lbl] - for x in children: - if self.tree[x]: - expands=1 - else: - expands=0 - self.t.add_node(name=x,flag=expands) + lbl = str(node.get_label()) + children = self.tree[lbl] + for x in children: + if self.tree[x]: + expands=1 + else: + expands=0 + self.t.add_node(name=x, flag=expands) # Expand the tree rooted at node recursively def expand_tree(self, node): - node.expand() - for child in node.children(): - if child.expandable(): - self.expand_tree(child) + node.expand() + for child in node.children(): + if child.expandable(): + self.expand_tree(child) def expand_all(self): self.expand_tree(self.t.root) @@ -642,7 +589,7 @@ def contract_all(self): self.t.root.collapse() def edit_tree(self, parent): - editFrame = Frame(parent, bd=1, background='white') + editFrame = Frame(parent, style='Main.TFrame') self.text = Text(editFrame, padx=5, wrap=None, undo = YES, background='white') @@ -681,11 +628,11 @@ def edit_tree(self, parent): # CFG production rules, using CSFamily.prod.lineno and endlineno self.text.config(takefocus=True) idx= self.text.search('feature', "START") - if idx: - self.text.mark_set(CURRENT, idx) - self.text.see(CURRENT) - else: - showwarning('Warning','Features not located in text') + if idx: + self.text.mark_set(CURRENT, idx) + self.text.see(CURRENT) + else: + showwarning('Warning','Features not located in text') editFrame.pack(expand=YES, fill=BOTH) @@ -699,20 +646,20 @@ def save_tree(self, parent): # Note: changes are not saved hereby!! self.cfile.compile_if_needed() self.cfile.onFeatures() - + class CTestbed(CTab): def __init__(self, parent, cfile): CTab.__init__(self, parent, cfile, 'Testbed') self.child = None - self.edit = None - self.text = None - self.editFrame = None - self.cnv = None - self.mainFrame = None - self.newInsert = None - - def makelab(self, text, row, col, **props): - lab = Label(self.child, text=text, background='white', **props) + self.edit = None + self.text = None + self.editFrame = None + self.cnv = None + self.mainFrame = None + self.newInsert = None + + def makelab(self, text, row, col, style_name=None, **kwargs): + lab = Label(self.child, text=text, style=(style_name or ''), **kwargs) # Make the label grow to fill all space allocated for the column lab.grid(row=row, column=col, sticky='NSEW') @@ -720,51 +667,51 @@ def makelab(self, text, row, col, **props): def reinit(self): if self.child: self.child.pack_forget() - if self.mainFrame: - self.mainFrame.pack_forget() + if self.mainFrame: + self.mainFrame.pack_forget() - self.mainFrame = Frame(self, bd=1, bg='white') - self.mainFrame.pack(expand=YES, fill=BOTH) + self.mainFrame = Frame(self, style='Main.TFrame') + self.mainFrame.pack(expand=YES, fill=BOTH) - self.mainFrame.grid_rowconfigure(0, weight=1) - self.mainFrame.grid_columnconfigure(0, weight=1) + self.mainFrame.grid_rowconfigure(0, weight=1) + self.mainFrame.grid_columnconfigure(0, weight=1) - xscrollbar = Scrollbar(self.mainFrame, orient=HORIZONTAL) - xscrollbar.grid(row=1, column=0, sticky=E+W) + xscrollbar = Scrollbar(self.mainFrame, orient=HORIZONTAL) + xscrollbar.grid(row=1, column=0, sticky=E+W) - yscrollbar = Scrollbar(self.mainFrame) - yscrollbar.grid(row=0, column=1, sticky=N+S) + yscrollbar = Scrollbar(self.mainFrame) + yscrollbar.grid(row=0, column=1, sticky=N+S) - self.cnv= Canvas(self.mainFrame, bd=2, xscrollcommand=xscrollbar.set, - yscrollcommand=yscrollbar.set, width = 847, height=369) + self.cnv= Canvas(self.mainFrame, bd=2, xscrollcommand=xscrollbar.set, + yscrollcommand=yscrollbar.set, width = 847, height=369) - xscrollbar.config(command=self.cnv.xview) - yscrollbar.config(command=self.cnv.yview) + xscrollbar.config(command=self.cnv.xview) + yscrollbar.config(command=self.cnv.yview) - self.child = Frame(self.cnv, bd=2, relief=SUNKEN, background='white') + self.child = Frame(self.cnv, style='Child.TFrame') - self.child.rowconfigure(1, weight=1) - self.child.columnconfigure(1, weight=1) + self.child.rowconfigure(1, weight=1) + self.child.columnconfigure(1, weight=1) self.child.pack(expand=YES, fill=BOTH) - butnFrame = Frame(self.child, relief=SUNKEN, bd=2) - butnFrame.grid(row=0, sticky='NSEW', columnspan=2) + butnFrame = Frame(self.child, style='Child.TFrame') + butnFrame.grid(row=0, sticky='NSEW', columnspan=2) - self.edit = Button(butnFrame, text='Edit', command= self.edit_testbed) - self.edit.pack(side=RIGHT) - self.newInsert = Button(butnFrame, text='New Sentence', command= self.new_sentence) - self.newInsert.pack(side=RIGHT) + self.edit = Button(butnFrame, text='Edit', command= self.edit_testbed) + self.edit.pack(side=RIGHT) + self.newInsert = Button(butnFrame, text='New Sentence', command= self.new_sentence) + self.newInsert.pack(side=RIGHT) self.cfile.compile_if_needed() - self.makelab("Num Parses", 1, 0, bd=1, relief=SUNKEN, fg="#77AA77", font = ("Helvetica", FontScale +12)) - self.makelab("Sentence", 1, 1, bd=1, relief=SUNKEN, fg="#77AA77", font = ("Helvetica", FontScale +12)) + self.makelab("Num Parses", 1, 0, 'TestBed.TLabel') + self.makelab("Sentence", 1, 1, 'TestBed.TLabel') # Make the column containing the sentences grow to include all # extra space self.child.columnconfigure(1, weight=1) - for i in xrange(len(self.cfile.curparse.testbed_statements)): + for i in range(len(self.cfile.curparse.testbed_statements)): x = self.cfile.curparse.testbed_statements[i] assert x[0] == 'item' x = x[1] @@ -772,29 +719,29 @@ def reinit(self): numparse = ccg2xml.getprop('numOfParses', x) string = ccg2xml.getprop('string', x) - # How many parses of the sentence are produced? + # How many parses of the sentence are produced? self.makelab('%s' % numparse, i+2, 0) - # Print the sentence itself + # Print the sentence itself self.makelab('%s%s' % (numparse == 0 and '*' or '', string), i+2, 1, anchor=W) - self.cnv.create_window(0, 0, anchor='nw', window=self.child) + self.cnv.create_window(0, 0, anchor='nw', window=self.child) - self.child.update_idletasks() - #self.child.grid(row=0, column=0, sticky=NSEW) + self.child.update_idletasks() + #self.child.grid(row=0, column=0, sticky=NSEW) - self.cnv.config(scrollregion=self.cnv.bbox("all")) - self.cnv.grid(row=0, column=0, sticky='NSEW') + self.cnv.config(scrollregion=self.cnv.bbox("all")) + self.cnv.grid(row=0, column=0, sticky='NSEW') # Edit the testbed def edit_testbed(self): - self.editFrame = Frame(self.mainFrame, bd=1, background='white') - #self.editFrame.grid(row=len(self.cfile.curparse.testbed_statements)+3, columnspan=2, sticky='NSEW') - self.editFrame.grid(row=2, columnspan=2, sticky='NSEW') + self.editFrame = Frame(self.mainFrame, style='Main.TFrame') + #self.editFrame.grid(row=len(self.cfile.curparse.testbed_statements)+3, columnspan=2, sticky='NSEW') + self.editFrame.grid(row=2, columnspan=2, sticky='NSEW') - self.text = Text(self.editFrame, padx=5, wrap=None, undo = YES, background='white') + self.text = Text(self.editFrame, padx=5, wrap=None, undo=YES, background='white') vbar = Scrollbar(self.editFrame) hbar = Scrollbar(self.editFrame, orient='horizontal') @@ -805,12 +752,12 @@ def edit_testbed(self): # Change the text on the button, and also pass the rest # of the arguments so that the grid for the statements can be reset - self.edit.config(text='Done', command= self.save_testbed) + self.edit.config(text='Done', command=self.save_testbed) - # Changing the mode of the cfile object here, + # Changing the mode of the cfile object here, # so that once the user clicks done, # the whole object is recompiled and redisplayed - self.cfile.mode= 'Edit' + self.cfile.mode = 'Edit' vbar.pack(side=RIGHT, fill=Y) hbar.pack(side=BOTTOM, fill=X) @@ -829,24 +776,24 @@ def edit_testbed(self): # The positioning of the insert cursor should be happening by parsing the # CFG production rules, using CSFamily.prod.lineno and endlineno self.text.config(takefocus=True) - idx= self.text.search('testbed', "START") - if idx: - self.text.mark_set(CURRENT, idx) - self.text.see(CURRENT) - else: - showwarning(title= 'VisCCG: Warning', message='No initial testbed found') + idx = self.text.search('testbed', "START") + if idx: + self.text.mark_set(CURRENT, idx) + self.text.see(CURRENT) + else: + showwarning(title= 'VisCCG: Warning', message='No initial testbed found') - #self.editFrame.pack(expand=YES, fill=BOTH) - self.child.update_idletasks() - self.cnv.config(scrollregion=self.cnv.bbox("all")) + #self.editFrame.pack(expand=YES, fill=BOTH) + self.child.update_idletasks() + self.cnv.config(scrollregion=self.cnv.bbox("all")) # Save the edited text def save_testbed(self): # We force the text contents of the cfile object to copy over # all that is presently in the current text-box - self.cfile.setAllText(self.text.get(1.0,END)) - self.edit.config(text='Edit', command= self.edit_testbed) - self.editFrame.pack_forget() + self.cfile.setAllText(self.text.get(1.0, END)) + self.edit.config(text='Edit', command=self.edit_testbed) + self.editFrame.pack_forget() # Recompile whatever was edited and redisplay # Note: changes are not saved hereby!! @@ -855,77 +802,77 @@ def save_testbed(self): # Enter a new sentence def new_sentence(self): - master = Tk() - master.title('VisCCG: New Sentence for the testbed') - sent = Entry(master, bg='#FFFFFF', width = 100) - nParses = Entry(master, bg='#FFFFFF', width = 2) - - sLabel = Label (master, text = 'Sentence:') - nLabel = Label (master, text = 'Number of parses:') - - sent.focus_set() - - b = Button(master, text="Add sentence", width=10, command= lambda:self.editNew(master, sent, nParses)) - c = Button(master, text="Cancel", command= master.destroy) - - sent.grid (row=1, column=0, sticky = W) - nParses.grid (row=1, column=1, sticky= W) - sLabel.grid (row=0, column=0, sticky=W) - nLabel.grid (row=0, column=1, sticky = W) - b.grid (row=2, column = 0) - c.grid (row=2, column = 1) + master = Tk() + master.title('VisCCG: New Sentence for the testbed') + sent = Entry(master, width=100) + nParses = Entry(master, width=2) + + sLabel = Label(master, text='Sentence:') + nLabel = Label(master, text='Number of parses:') + + sent.focus_set() + + b = Button(master, text="Add sentence", width=10, command= lambda:self.editNew(master, sent, nParses)) + c = Button(master, text="Cancel", command= master.destroy) + + sent.grid (row=1, column=0, sticky = W) + nParses.grid (row=1, column=1, sticky= W) + sLabel.grid (row=0, column=0, sticky=W) + nLabel.grid (row=0, column=1, sticky = W) + b.grid (row=2, column = 0) + c.grid (row=2, column = 1) # Print from the new sentence def editNew(self, master, sent, nParses): # Prepare the file's contents for editing fileData = self.cfile.getAllText() - self.text = Text(master) + self.text = Text(master) self.text.mark_set("START", INSERT) self.text.mark_gravity("START", LEFT) self.text.insert(INSERT, fileData) - testSent = sent.get() - npSent = nParses.get() + testSent = sent.get() + npSent = nParses.get() self.text.config(takefocus=True) idx= self.text.search('testbed', "START") - if idx: - self.text.mark_set("START", idx) - idx= self.text.search('{', "START", forwards = True) - self.text.mark_set("START", idx) - idx= self.text.search('\n', "START", forwards = True) - # FIXME: really poor search for locating the right position - # to insert text here. Needs correction! - self.text.mark_set(INSERT, idx) - self.text.mark_gravity(INSERT, RIGHT) - - self.text.insert (INSERT, '\n\t'+ testSent+ ':\t'+ npSent+ ';') - - else: - showwarning(title= 'VisCCG: Warning', message='No initial testbed found, creating new') - self.text.mark_set(INSERT, END) - self.text.mark_gravity(INSERT, RIGHT) - - self.text.insert (INSERT, ' testbed {\n') - self.text.insert (INSERT, '\n\t'+ testSent+ ':\t'+ npSent+ ';') - self.text.insert (INSERT, '}\n') - - - # Set the original file's data to be this - fileData= self.text.get(1.0, END) - self.cfile.setAllText(fileData) - - # Destroy the entry window - master.destroy() - - # Update the display + if idx: + self.text.mark_set("START", idx) + idx = self.text.search('{', "START", forwards = True) + self.text.mark_set("START", idx) + idx = self.text.search('\n', "START", forwards = True) + # FIXME: really poor search for locating the right position + # to insert text here. Needs correction! + self.text.mark_set(INSERT, idx) + self.text.mark_gravity(INSERT, RIGHT) + + self.text.insert (INSERT, '\n\t'+ testSent+ ':\t'+ npSent+ ';') + + else: + showwarning(title= 'VisCCG: Warning', message='No initial testbed found, creating new') + self.text.mark_set(INSERT, END) + self.text.mark_gravity(INSERT, RIGHT) + + self.text.insert (INSERT, ' testbed {\n') + self.text.insert (INSERT, '\n\t'+ testSent+ ':\t'+ npSent+ ';') + self.text.insert (INSERT, '}\n') + + + # Set the original file's data to be this + fileData= self.text.get(1.0, END) + self.cfile.setAllText(fileData) + + # Destroy the entry window + master.destroy() + + # Update the display self.cfile.mode= 'Edit' self.cfile.compile_if_needed() self.cfile.onTestbed() # Creates the top-level window and populates the widgets below it. -class CFile(object): +class CFile: #### NOTE NOTE NOTE! Variables declared like this, in the class itself, #### are class variables (not instance variables) until they are #### assigned to. If you want pure instance variables, you need to @@ -960,7 +907,7 @@ class CFile(object): ('system', 10+FontScale, 'normal'), ('courier', 20+FontScale, 'normal')] - def __init__(self, file=None): + def __init__(self, file=None, parent=None): self.file = file self.openDialog = None @@ -968,16 +915,10 @@ def __init__(self, file=None): self.lastfind = None self.current_parse = None self.mode = None - self.last_save_signature = None - self.last_compile_signature = None - - # First top-level window is Tk(); rest are Toplevel() - global root - if not root: - root = Tk() - self.top = root - else: - self.top = Toplevel(root) + self.last_save_signature = None + self.last_compile_signature = None + + self.top = parent or Toplevel(root) ccg2xml.late_init_graphics() openfiles[self] = True @@ -997,18 +938,17 @@ def __init__(self, file=None): #self.switch_to('Edit') self.setFileName(None) if file: - self.onFirstOpen(file) - else: - # When the user has just opened a new file - # Need to load template from the src folder - openccg_home = os.environ['OPENCCG_HOME'] - template = open(openccg_home + '/src/ccg2xml/grammar_template.ccg', 'r').read() - self.setAllText(template) - - # Save the MD5 signature for future comparison - self.last_save_signature = self.getSignature(self.getAllText()) - #print "The modes should have been initialized now" - self.switch_to('Edit') + self.onFirstOpen(file) + else: + # When the user has just opened a new file + # Need to load template from the src folder + openccg_home = os.environ['OPENCCG_HOME'] + template = open(openccg_home + '/src/ccg2xml/grammar_template.ccg', 'r').read() + self.setAllText(template) + + # Save the MD5 signature for future comparison + self.last_save_signature = self.getSignature(self.getAllText()) + self.switch_to('Edit') def switch_to(self, mode): # Switch to a different mode (display, edit, test). Remove the @@ -1030,7 +970,7 @@ def switch_to(self, mode): self.makeMenubar() self.makeToolbar(mode) self.makeCheckbar() - #print "Reinit being called now... " + # print("Reinit being called now...") self.main.reinit() # Pack the main widget after the toolbar, so it goes below it. self.main.pack(side=TOP, expand=YES, fill=BOTH) @@ -1067,7 +1007,7 @@ def addMenuItems(self, menu, items): for num in item: menu.entryconfig(num, state=DISABLED) elif type(item[2]) is not list: - menu.add_command(label = item[0], # command: + menu.add_command(label = item[0], # command: underline = item[1], # add command command = item[2]) # cmd=callable else: @@ -1075,7 +1015,7 @@ def addMenuItems(self, menu, items): self.addMenuItems(pullover, item[2]) # sublist: menu.add_cascade(label = item[0], # make submenu underline = item[1], # add cascade - menu = pullover) + menu = pullover) def makeToolbar(self, selected): """ @@ -1084,13 +1024,13 @@ def makeToolbar(self, selected): """ if self.main.toolbar: self.toolbar_widget = Frame(self.outer, cursor='hand2', - relief=SUNKEN, bd=2) + style='Child.TFrame') self.toolbar_widget.pack(side=TOP, fill=X) for (name, action, where) in self.main.toolbar: but = Button(self.toolbar_widget, text=name, command=action) if name == selected: - but.config(relief=SUNKEN) + but.config(style='TBSelected.TButton') but.pack(where) def makeCheckbar(self): @@ -1100,7 +1040,7 @@ def makeCheckbar(self): """ if self.main.checkbar: self.checkbar_widget = Frame(self.outer, cursor='hand2', - relief=SUNKEN, bd=2) + style='Child.TFrame') self.checkbar_widget.pack(side=TOP, fill=X) for (name, var) in self.main.checkbar: Checkbutton(self.checkbar_widget, text=name, @@ -1125,16 +1065,16 @@ def _getints(self, string): def edit(self, *args): """Internal method - + This method controls the undo mechanism and the modified flag. The exact behavior of the command depends on the option argument that follows the edit argument. The following forms of the command are currently supported: - + edit_modified, edit_redo, edit_reset, edit_separator and edit_undo - + """ textwid = self.modes['Edit'].text return self._getints( @@ -1150,20 +1090,20 @@ def edit_modified(self, arg=None): modified flag of the widget to arg. """ - # Added to use md5 functionality to watch for changed data - if (arg == None): - alltext = self.getAllText() - if (self.last_save_signature != self.getSignature(alltext)): - return YES - return self.edit("modified", arg) + # Added to use md5 functionality to watch for changed data + if arg is None: + alltext = self.getAllText() + if (self.last_save_signature != self.getSignature(alltext)): + return YES + return self.edit("modified", arg) def onInfo(self): text = self.getAllText() # added on 5/3/00 in 15 mins - bytes = len(text) # words uses a simple guess: - lines = len(string.split(text, '\n')) # any separated by whitespace - words = len(string.split(text)) + bytes = len(text) # words uses a simple guess: + lines = len(text.splitlines()) # any separated by whitespace + words = len(text.split()) index = self.main.text.index(INSERT) - where = tuple(string.split(index, '.')) + where = tuple(index.split('.')) showinfo('CCG Editor Information', 'Current location:\n\n' + @@ -1176,15 +1116,15 @@ def onInfo(self): ####################### # Search menu commands ####################### - + def onGoto(self, line=None): if not line: - line = askinteger('CCG Editor', 'Enter line number') - self.main.text.update() + line = askinteger('CCG Editor', 'Enter line number') + self.main.text.update() self.main.text.focus() if line is not None: maxindex = self.main.text.index(END+'-1c') - maxline = atoi(split(maxindex, '.')[0]) + maxline = int(maxindex.split('.')[0]) if line > 0 and line <= maxline: self.main.text.mark_set(INSERT, '%d.0' % line) # goto line self.main.text.tag_remove(SEL, '1.0', END) # delete selects @@ -1205,7 +1145,7 @@ def onFind(self, lastkey=None): else: pastkey = where + '+%dc' % len(key) # index past key self.main.text.tag_remove(SEL, '1.0', END) # remove any sel - self.main.text.tag_add(SEL, where, pastkey) # select key + self.main.text.tag_add(SEL, where, pastkey) # select key self.main.text.mark_set(INSERT, pastkey) # for next find self.main.text.see(where) # scroll display @@ -1213,87 +1153,57 @@ def onRefind(self): self.onFind(self.lastfind) ###################### - # Tools menu commands + # Tools menu commands ###################### def onFontList(self): self.fonts.append(self.fonts[0]) # pick next font in list del self.fonts[0] # resizes the text area - self.modes['Edit'].text.config(font=self.fonts[0]) - self.modes['Display'].text.config(font=self.fonts[0]) + self.modes['Edit'].text.config(font=self.fonts[0]) + self.modes['Display'].text.config(font=self.fonts[0]) def onColorList(self): self.colors.append(self.colors[0]) # pick next color in list del self.colors[0] # move current to end - self.modes['Edit'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) - self.modes['Display'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) + self.modes['Edit'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) + self.modes['Display'].text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg']) - def onPickFg(self): + def onPickFg(self): self.pickColor('fg') def onPickBg(self): self.pickColor('bg') def pickColor(self, part): (triple, hexstr) = askcolor() if hexstr: - apply(self.modes['Edit'].text.config, (), {part: hexstr}) - apply(self.modes['Display'].text.config, (), {part: hexstr}) - -# def onRunCode(self, parallelmode=1): -# """ -# run Python code being edited--not an ide, but handy; -# tries to run in file's dir, not cwd (may be pp2e root); -# inputs and adds command-line arguments for script files; -# code's stdin/out/err = editor's start window, if any; -# but parallelmode uses start to open a dos box for i/o; -# """ -# from PP2E.launchmodes import System, Start, Fork -# filemode = 0 -# thefile = str(self.getFileName()) -# cmdargs = askstring('CCG Editor', 'Commandline arguments?') or '' -# if os.path.exists(thefile): -# filemode = askyesno('CCG Editor', 'Run from file?') -# if not filemode: # run text string -# namespace = {'__name__': '__main__'} # run as top-level -# sys.argv = [thefile] + string.split(cmdargs) # could use threads -# exec self.getAllText() + '\n' in namespace # exceptions ignored -# elif askyesno('CCG Editor', 'Text saved in file?'): -# mycwd = os.getcwd() # cwd may be root -# os.chdir(os.path.dirname(thefile) or mycwd) # cd for filenames -# thecmd = thefile + ' ' + cmdargs -# if not parallelmode: # run as file -# System(thecmd, thecmd)() # block editor -# else: -# if sys.platform[:3] == 'win': # spawn in parallel -# Start(thecmd, thecmd)() # or use os.spawnv -# else: -# Fork(thecmd, thecmd)() # spawn in parallel -# os.chdir(mycwd) + self.modes['Edit'].text.config(*(), **{part: hexstr}) + self.modes['Display'].text.config(*(), **{part: hexstr}) + ##################### # File menu commands ##################### def getSignature(self, contents): - return md5.md5(contents).digest() + return hashlib.md5(contents.encode('utf-8')).digest() def my_askopenfilename(self): # objects remember last result dir/file if not self.openDialog: - self.openDialog = Open(initialdir=self.startfiledir, - filetypes=self.ftypes) + self.openDialog = Open(initialdir=self.startfiledir, + filetypes=self.ftypes) return self.openDialog.show() def my_asksaveasfilename(self): # objects remember last result dir/file if not self.saveDialog: - self.saveDialog = SaveAs(initialdir=self.startfiledir, - filetypes=self.ftypes) - self.last_save_signature = self.getSignature(self.getAllText()) + self.saveDialog = SaveAs(initialdir=self.startfiledir, + filetypes=self.ftypes) + self.last_save_signature = self.getSignature(self.getAllText()) return self.saveDialog.show() - + def onOpen(self): file = self.my_askopenfilename() # FIXME! Only create new window if file exists and is readable if file: - CFile(file) + CFile(file, parent=self.top) def onFirstOpen(self, file): try: @@ -1306,16 +1216,16 @@ def onFirstOpen(self, file): def compile_if_needed(self): # Compare the last compiled MD5 signature and present one - # and compile if needed. - # To force compilation, set this signature to None + # and compile if needed. + # To force compilation, set this signature to None text = self.getAllText() - textSign = self.getSignature(text) + textSign = self.getSignature(text) if textSign != self.last_compile_signature: - # Now compile - ccg2xml.init_global_state(errors_to_string=True) - ccg2xml.options.quiet = True - self.curparse = ccg2xml.parse_string(text) - self.last_compiled_signature = textSign + # Now compile + ccg2xml.init_global_state(errors_to_string=True) + ccg2xml.options.quiet = True + self.curparse = ccg2xml.parse_string(text) + self.last_compiled_signature = textSign def onDisplay(self): self.switch_to('Display') @@ -1338,11 +1248,8 @@ def onWords(self): def onFeatures(self): self.switch_to('Features') - def onWords(self): - self.switch_to('Words') - def onNew(self): - CFile() + CFile(parent=self.top) def getFileName(self): return self.currfile @@ -1369,7 +1276,7 @@ def onClose(self): ccg2xml.debug("fooooo\n") del openfiles[self] self.top.destroy() - + def onQuit(self): modfiles = False for f in openfiles: @@ -1390,7 +1297,9 @@ def main(): else: fname = None - CFile(fname) + app_root = Tk() + set_ttk_styles() + CFile(fname, parent=app_root) mainloop() if __name__ == '__main__': # when run as a script diff --git a/src/ccg2xml/convert-ply.py b/src/ccg2xml/convert-ply.py index 2e543b0..e894d0f 100755 --- a/src/ccg2xml/convert-ply.py +++ b/src/ccg2xml/convert-ply.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 import sys import re @@ -94,7 +94,7 @@ def syntax_error(err, line): maxerr) sys.stderr.write("%s in file %s, line %d: %s\n" % (err, current_file, current_lineno, line)) - + wordrange = r'\-a-zA-Z0-9_%' operrange = r'\+\*\|\?' @@ -203,17 +203,16 @@ def output_python_cfg_rule(fil, lhs, rhs, code): output_python_cfg_rule(fil, lhs, leftrhs + rightrhs, code) else: unique_no += 1 - print >> fil, "def p_%s_%d(p):" % (make_name_python_safe(lhs), - unique_no) + print("def p_%s_%d(p):" % (make_name_python_safe(lhs), unique_no), file=fil) rhs = rhs.strip() rhs = re.sub(r'\s*\|\s*', r'\n | ', rhs) rhs = re.sub(r'\n\s*\n', '\n', rhs) if rhs.find('\n') >= 0: - print >> fil, " '''%s : %s'''" % (lhs, rhs) + print(" '''%s : %s'''" % (lhs, rhs), file=fil) else: - print >> fil, " '%s : %s'" % (lhs, rhs) + print(" '%s : %s'" % (lhs, rhs), file=fil) code = replace_dollar_signs(code) - print >> fil, code + print(code, file=fil) def output_default_python_cfg_rule(fil, lhs, rhs): output_python_cfg_rule(fil, lhs, rhs, " $$ = $1\n") @@ -267,7 +266,7 @@ def clear_rule_context(): mode = 'python' contline = None - print >> outfil, """#!/usr/bin/python + print("""#!/usr/bin/python ################## NOTE NOTE NOTE ################## # @@ -282,7 +281,7 @@ def clear_rule_context(): """ % (outarg, current_file, sys.argv[0], time.asctime(), current_file, sys.argv[0], options.outfile and " -o %s" % options.outfile or "", - current_file) + current_file), file=outfil) global current_lineno current_lineno = 0 @@ -301,7 +300,7 @@ def clear_rule_context(): mode = 'lex' else: if mode == 'python': - print >> outfil, line + print(line, file=outfil) else: if yacc_python_mode: if re.match(r'\S', line): @@ -310,13 +309,13 @@ def clear_rule_context(): yacc_python_code += line + '\n' continue if re.match(r'\s*#.*$', line): - print >> outfil, line + print(line, file=outfil) continue elif line and line[-1] == '\\': contline = line[0:-1] continue elif re.match(r'\s*$', line): - print >> outfil, line + print(line, file=outfil) continue # Eliminate comments, but conservatively, to avoid any # possibility of removing comments inside of quotes (which diff --git a/src/ccg2xml/lex.py b/src/ccg2xml/lex.py index 85a152a..7b573ed 100644 --- a/src/ccg2xml/lex.py +++ b/src/ccg2xml/lex.py @@ -1,288 +1,314 @@ -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # ply: lex.py # -# Author: David M. Beazley (dave@dabeaz.com) -# -# Copyright (C) 2001-2005, David M. Beazley -# -# $Header: /cvsroot/openccg/openccg/src/ccg2xml/lex.py,v 1.1 2006/09/30 08:11:29 benwing Exp $ -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# See the file COPYING for a complete copy of the LGPL. -# -# -# This module automatically constructs a lexical analysis module from regular -# expression rules defined in a user-defined module. The idea is essentially the same -# as that used in John Aycock's Spark framework, but the implementation works -# at the module level rather than requiring the use of classes. -# -# This module tries to provide an interface that is closely modeled after -# the traditional lex interface in Unix. It also differs from Spark -# in that: -# -# - It provides more extensive error checking and reporting if -# the user supplies a set of regular expressions that can't -# be compiled or if there is any other kind of a problem in -# the specification. +# Copyright (C) 2001-2018 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. # -# - The interface is geared towards LALR(1) and LR(1) parser -# generators. That is tokens are generated one at a time -# rather than being generated in advanced all in one step. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: # -# There are a few limitations of this module +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. # -# - The module interface makes it somewhat awkward to support more -# than one lexer at a time. Although somewhat inelegant from a -# design perspective, this is rarely a practical concern for -# most compiler projects. -# -# - The lexer requires that the entire input text be read into -# a string before scanning. I suppose that most machines have -# enough memory to make this a minor issues, but it makes -# the lexer somewhat difficult to use in interactive sessions -# or with streaming data. -# -#----------------------------------------------------------------------------- - -r""" -lex.py - -This module builds lex-like scanners based on regular expression rules. -To use the module, simply write a collection of regular expression rules -and actions like this: - -# lexer.py -import lex - -# Define a list of valid tokens -tokens = ( - 'IDENTIFIER', 'NUMBER', 'PLUS', 'MINUS' - ) - -# Define tokens as functions -def t_IDENTIFIER(t): - r' ([a-zA-Z_](\w|_)* ' - return t - -def t_NUMBER(t): - r' \d+ ' - return t - -# Some simple tokens with no actions -t_PLUS = r'\+' -t_MINUS = r'-' - -# Initialize the lexer -lex.lex() - -The tokens list is required and contains a complete list of all valid -token types that the lexer is allowed to produce. Token types are -restricted to be valid identifiers. This means that 'MINUS' is a valid -token type whereas '-' is not. - -Rules are defined by writing a function with a name of the form -t_rulename. Each rule must accept a single argument which is -a token object generated by the lexer. This token has the following -attributes: - - t.type = type string of the token. This is initially set to the - name of the rule without the leading t_ - t.value = The value of the lexeme. - t.lineno = The value of the line number where the token was encountered - -For example, the t_NUMBER() rule above might be called with the following: - - t.type = 'NUMBER' - t.value = '42' - t.lineno = 3 - -Each rule returns the token object it would like to supply to the -parser. In most cases, the token t is returned with few, if any -modifications. To discard a token for things like whitespace or -comments, simply return nothing. For instance: - -def t_whitespace(t): - r' \s+ ' - pass - -For faster lexing, you can also define this in terms of the ignore set like this: - -t_ignore = ' \t' - -The characters in this string are ignored by the lexer. Use of this feature can speed -up parsing significantly since scanning will immediately proceed to the next token. - -lex requires that the token returned by each rule has an attribute -t.type. Other than this, rules are free to return any kind of token -object that they wish and may construct a new type of token object -from the attributes of t (provided the new object has the required -type attribute). - -If illegal characters are encountered, the scanner executes the -function t_error(t) where t is a token representing the rest of the -string that hasn't been matched. If this function isn't defined, a -LexError exception is raised. The .text attribute of this exception -object contains the part of the string that wasn't matched. - -The t.skip(n) method can be used to skip ahead n characters in the -input stream. This is usually only used in the error handling rule. -For instance, the following rule would print an error message and -continue: - -def t_error(t): - print "Illegal character in input %s" % t.value[0] - t.skip(1) - -Of course, a nice scanner might wish to skip more than one character -if the input looks very corrupted. - -The lex module defines a t.lineno attribute on each token that can be used -to track the current line number in the input. The value of this -variable is not modified by lex so it is up to your lexer module -to correctly update its value depending on the lexical properties -of the input language. To do this, you might write rules such as -the following: - -def t_newline(t): - r' \n+ ' - t.lineno += t.value.count("\n") - -To initialize your lexer so that it can be used, simply call the lex.lex() -function in your rule file. If there are any errors in your -specification, warning messages or an exception will be generated to -alert you to the problem. - -(dave: this needs to be rewritten) -To use the newly constructed lexer from another module, simply do -this: - - import lex - import lexer - plex.input("position = initial + rate*60") - - while 1: - token = plex.token() # Get a token - if not token: break # No more tokens - ... do whatever ... - -Assuming that the module 'lexer' has initialized plex as shown -above, parsing modules can safely import 'plex' without having -to import the rule file or any additional imformation about the -scanner you have defined. -""" - +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- - -__version__ = "1.6" - -import re, types, sys, copy - -# Exception thrown when invalid token encountered and no default +__version__ = '3.11' +__tabversion__ = '3.10' + +import re +import sys +import types +import copy +import os +import inspect + +# This tuple contains known string types +try: + # Python 2.6 + StringTypes = (types.StringType, types.UnicodeType) +except AttributeError: + # Python 3.0 + StringTypes = (str, bytes) + +# This regular expression is used to match valid token names +_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') + +# Exception thrown when invalid token encountered and no default error +# handler is defined. class LexError(Exception): - def __init__(self,message,s): - self.args = (message,) - self.text = s + def __init__(self, message, s): + self.args = (message,) + self.text = s -# Token class -class LexToken: + +# Token class. This class is used to represent the tokens produced. +class LexToken(object): def __str__(self): - return "LexToken(%s,%r,%d)" % (self.type,self.value,self.lineno) + return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) + def __repr__(self): return str(self) - def skip(self,n): - try: - self._skipn += n - except AttributeError: - self._skipn = n + + +# This object is a stand-in for a logging object created by the +# logging module. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def critical(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + info = critical + debug = critical + + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + # ----------------------------------------------------------------------------- -# Lexer class +# === Lexing Engine === +# +# The following Lexer class implements the lexer runtime. There are only +# a few public methods and attributes: # # input() - Store a new string in the lexer # token() - Get the next token +# clone() - Clone the lexer +# +# lineno - Current line number +# lexpos - Current position in the input string # ----------------------------------------------------------------------------- class Lexer: def __init__(self): - self.lexre = None # Master regular expression - self.lexdata = None # Actual input data (as a string) - self.lexpos = 0 # Current position in input text - self.lexlen = 0 # Length of the input text - self.lexindexfunc = [ ] # Reverse mapping of groups to functions and types - self.lexerrorf = None # Error rule (if any) - self.lextokens = None # List of valid tokens - self.lexignore = None # Ignored characters - self.lineno = 1 # Current line number - self.debug = 0 # Debugging mode - self.optimize = 0 # Optimized mode - self.token = self.errtoken - - def __copy__(self): - c = Lexer() - c.lexre = self.lexre - c.lexdata = self.lexdata - c.lexpos = self.lexpos - c.lexlen = self.lexlen - c.lexindexfunc = self.lexindexfunc - c.lexerrorf = self.lexerrorf - c.lextokens = self.lextokens - c.lexignore = self.lexignore - c.debug = self.debug - c.lineno = self.lineno - c.optimize = self.optimize - c.token = c.realtoken - return c + self.lexre = None # Master regular expression. This is a list of + # tuples (re, findex) where re is a compiled + # regular expression and findex is a list + # mapping regex group numbers to rules + self.lexretext = None # Current regular expression strings + self.lexstatere = {} # Dictionary mapping lexer states to master regexs + self.lexstateretext = {} # Dictionary mapping lexer states to regex strings + self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names + self.lexstate = 'INITIAL' # Current lexer state + self.lexstatestack = [] # Stack of lexer states + self.lexstateinfo = None # State information + self.lexstateignore = {} # Dictionary of ignored characters for each state + self.lexstateerrorf = {} # Dictionary of error functions for each state + self.lexstateeoff = {} # Dictionary of eof functions for each state + self.lexreflags = 0 # Optional re compile flags + self.lexdata = None # Actual input data (as a string) + self.lexpos = 0 # Current position in input text + self.lexlen = 0 # Length of the input text + self.lexerrorf = None # Error rule (if any) + self.lexeoff = None # EOF rule (if any) + self.lextokens = None # List of valid tokens + self.lexignore = '' # Ignored characters + self.lexliterals = '' # Literal characters that can be passed through + self.lexmodule = None # Module + self.lineno = 1 # Current line number + self.lexoptimize = False # Optimized mode + + def clone(self, object=None): + c = copy.copy(self) + + # If the object parameter has been supplied, it means we are attaching the + # lexer to a new object. In this case, we have to rebind all methods in + # the lexstatere and lexstateerrorf tables. + + if object: + newtab = {} + for key, ritem in self.lexstatere.items(): + newre = [] + for cre, findex in ritem: + newfindex = [] + for f in findex: + if not f or not f[0]: + newfindex.append(f) + continue + newfindex.append((getattr(object, f[0].__name__), f[1])) + newre.append((cre, newfindex)) + newtab[key] = newre + c.lexstatere = newtab + c.lexstateerrorf = {} + for key, ef in self.lexstateerrorf.items(): + c.lexstateerrorf[key] = getattr(object, ef.__name__) + c.lexmodule = object + return c + + # ------------------------------------------------------------ + # writetab() - Write lexer information to a table file + # ------------------------------------------------------------ + def writetab(self, lextab, outputdir=''): + if isinstance(lextab, types.ModuleType): + raise IOError("Won't overwrite existing lextab module") + basetabmodule = lextab.split('.')[-1] + filename = os.path.join(outputdir, basetabmodule) + '.py' + with open(filename, 'w') as tf: + tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) + tf.write('_tabversion = %s\n' % repr(__tabversion__)) + tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) + tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags))) + tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) + tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) + + # Rewrite the lexstatere table, replacing function objects with function names + tabre = {} + for statename, lre in self.lexstatere.items(): + titem = [] + for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): + titem.append((retext, _funcs_to_names(func, renames))) + tabre[statename] = titem + + tf.write('_lexstatere = %s\n' % repr(tabre)) + tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) + + taberr = {} + for statename, ef in self.lexstateerrorf.items(): + taberr[statename] = ef.__name__ if ef else None + tf.write('_lexstateerrorf = %s\n' % repr(taberr)) + + tabeof = {} + for statename, ef in self.lexstateeoff.items(): + tabeof[statename] = ef.__name__ if ef else None + tf.write('_lexstateeoff = %s\n' % repr(tabeof)) + + # ------------------------------------------------------------ + # readtab() - Read lexer information from a tab file + # ------------------------------------------------------------ + def readtab(self, tabfile, fdict): + if isinstance(tabfile, types.ModuleType): + lextab = tabfile + else: + exec('import %s' % tabfile) + lextab = sys.modules[tabfile] + + if getattr(lextab, '_tabversion', '0.0') != __tabversion__: + raise ImportError('Inconsistent PLY version') + + self.lextokens = lextab._lextokens + self.lexreflags = lextab._lexreflags + self.lexliterals = lextab._lexliterals + self.lextokens_all = self.lextokens | set(self.lexliterals) + self.lexstateinfo = lextab._lexstateinfo + self.lexstateignore = lextab._lexstateignore + self.lexstatere = {} + self.lexstateretext = {} + for statename, lre in lextab._lexstatere.items(): + titem = [] + txtitem = [] + for pat, func_name in lre: + titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) + + self.lexstatere[statename] = titem + self.lexstateretext[statename] = txtitem + + self.lexstateerrorf = {} + for statename, ef in lextab._lexstateerrorf.items(): + self.lexstateerrorf[statename] = fdict[ef] + + self.lexstateeoff = {} + for statename, ef in lextab._lexstateeoff.items(): + self.lexstateeoff[statename] = fdict[ef] + + self.begin('INITIAL') # ------------------------------------------------------------ # input() - Push a new string into the lexer # ------------------------------------------------------------ - def input(self,s): - if not isinstance(s,types.StringType): - raise ValueError, "Expected a string" + def input(self, s): + # Pull off the first character to see if s looks like a string + c = s[:1] + if not isinstance(c, StringTypes): + raise ValueError('Expected a string') self.lexdata = s self.lexpos = 0 self.lexlen = len(s) - self.token = self.realtoken - - # Change the token routine to point to realtoken() - global token - if token == self.errtoken: - token = self.token # ------------------------------------------------------------ - # errtoken() - Return error if token is called with no data + # begin() - Changes the lexing state + # ------------------------------------------------------------ + def begin(self, state): + if state not in self.lexstatere: + raise ValueError('Undefined state') + self.lexre = self.lexstatere[state] + self.lexretext = self.lexstateretext[state] + self.lexignore = self.lexstateignore.get(state, '') + self.lexerrorf = self.lexstateerrorf.get(state, None) + self.lexeoff = self.lexstateeoff.get(state, None) + self.lexstate = state + # ------------------------------------------------------------ - def errtoken(self): - raise RuntimeError, "No input string given with input()" - + # push_state() - Changes the lexing state and saves old on stack # ------------------------------------------------------------ - # token() - Return the next token from the Lexer + def push_state(self, state): + self.lexstatestack.append(self.lexstate) + self.begin(state) + + # ------------------------------------------------------------ + # pop_state() - Restores the previous state + # ------------------------------------------------------------ + def pop_state(self): + self.begin(self.lexstatestack.pop()) + + # ------------------------------------------------------------ + # current_state() - Returns the current lexing state + # ------------------------------------------------------------ + def current_state(self): + return self.lexstate + + # ------------------------------------------------------------ + # skip() - Skip ahead n characters + # ------------------------------------------------------------ + def skip(self, n): + self.lexpos += n + + # ------------------------------------------------------------ + # opttoken() - Return the next token from the Lexer # # Note: This function has been carefully implemented to be as fast # as possible. Don't make changes unless you really know what # you are doing # ------------------------------------------------------------ - def realtoken(self): + def token(self): # Make local copies of frequently referenced attributes lexpos = self.lexpos lexlen = self.lexlen lexignore = self.lexignore lexdata = self.lexdata - + while lexpos < lexlen: # This code provides some short-circuit code for whitespace, tabs, and other ignored characters if lexdata[lexpos] in lexignore: @@ -290,386 +316,742 @@ def realtoken(self): continue # Look for a regular expression match - m = self.lexre.match(lexdata,lexpos) - if m: - i = m.lastindex - lexpos = m.end() + for lexre, lexindexfunc in self.lexre: + m = lexre.match(lexdata, lexpos) + if not m: + continue + + # Create a token for return tok = LexToken() tok.value = m.group() tok.lineno = self.lineno - tok.lexer = self - func,tok.type = self.lexindexfunc[i] + tok.lexpos = lexpos + + i = m.lastindex + func, tok.type = lexindexfunc[i] + if not func: - self.lexpos = lexpos - return tok - + # If no token type was set, it's an ignored token + if tok.type: + self.lexpos = m.end() + return tok + else: + lexpos = m.end() + break + + lexpos = m.end() + # If token is processed by a function, call it + + tok.lexer = self # Set additional attributes useful in token rules + self.lexmatch = m self.lexpos = lexpos + newtok = func(tok) - self.lineno = tok.lineno # Update line number - + # Every function must return a token, if nothing, we just move to next token - if not newtok: continue - + if not newtok: + lexpos = self.lexpos # This is here in case user has updated lexpos. + lexignore = self.lexignore # This is here in case there was a state change + break + # Verify type of the token. If not in the token map, raise an error - if not self.optimize: - if not self.lextokens.has_key(newtok.type): - raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( - func.func_code.co_filename, func.func_code.co_firstlineno, - func.__name__, newtok.type),lexdata[lexpos:]) + if not self.lexoptimize: + if newtok.type not in self.lextokens_all: + raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( + func.__code__.co_filename, func.__code__.co_firstlineno, + func.__name__, newtok.type), lexdata[lexpos:]) return newtok + else: + # No match, see if in literals + if lexdata[lexpos] in self.lexliterals: + tok = LexToken() + tok.value = lexdata[lexpos] + tok.lineno = self.lineno + tok.type = tok.value + tok.lexpos = lexpos + self.lexpos = lexpos + 1 + return tok - # No match. Call t_error() if defined. - if self.lexerrorf: - tok = LexToken() - tok.value = self.lexdata[lexpos:] - tok.lineno = self.lineno - tok.type = "error" - tok.lexer = self - oldpos = lexpos - newtok = self.lexerrorf(tok) - lexpos += getattr(tok,"_skipn",0) - if oldpos == lexpos: - # Error method didn't change text position at all. This is an error. + # No match. Call t_error() if defined. + if self.lexerrorf: + tok = LexToken() + tok.value = self.lexdata[lexpos:] + tok.lineno = self.lineno + tok.type = 'error' + tok.lexer = self + tok.lexpos = lexpos self.lexpos = lexpos - raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) - if not newtok: continue - self.lexpos = lexpos - return newtok + newtok = self.lexerrorf(tok) + if lexpos == self.lexpos: + # Error method didn't change text position at all. This is an error. + raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) + lexpos = self.lexpos + if not newtok: + continue + return newtok + self.lexpos = lexpos + raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) + + if self.lexeoff: + tok = LexToken() + tok.type = 'eof' + tok.value = '' + tok.lineno = self.lineno + tok.lexpos = lexpos + tok.lexer = self self.lexpos = lexpos - raise LexError, ("No match found", lexdata[lexpos:]) + newtok = self.lexeoff(tok) + return newtok - # No more input data self.lexpos = lexpos + 1 + if self.lexdata is None: + raise RuntimeError('No input string given with input()') return None - + # Iterator interface + def __iter__(self): + return self + + def next(self): + t = self.token() + if t is None: + raise StopIteration + return t + + __next__ = next + # ----------------------------------------------------------------------------- -# validate_file() +# ==== Lex Builder === # -# This checks to see if there are duplicated t_rulename() functions or strings -# in the parser input file. This is done using a simple regular expression -# match on each line in the filename. +# The functions and classes below are used to collect lexing information +# and build a Lexer object from it. # ----------------------------------------------------------------------------- -def validate_file(filename): - import os.path - base,ext = os.path.splitext(filename) - if ext != '.py': return 1 # No idea what the file is. Return OK +# ----------------------------------------------------------------------------- +# _get_regex(func) +# +# Returns the regular expression assigned to a function either as a doc string +# or as a .regex attribute attached by the @TOKEN decorator. +# ----------------------------------------------------------------------------- +def _get_regex(func): + return getattr(func, 'regex', func.__doc__) +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# _funcs_to_names() +# +# Given a list of regular expression functions, this converts it to a list +# suitable for output to a table file +# ----------------------------------------------------------------------------- +def _funcs_to_names(funclist, namelist): + result = [] + for f, name in zip(funclist, namelist): + if f and f[0]: + result.append((name, f[1])) + else: + result.append(f) + return result + +# ----------------------------------------------------------------------------- +# _names_to_funcs() +# +# Given a list of regular expression function names, this converts it back to +# functions. +# ----------------------------------------------------------------------------- +def _names_to_funcs(namelist, fdict): + result = [] + for n in namelist: + if n and n[0]: + result.append((fdict[n[0]], n[1])) + else: + result.append(n) + return result + +# ----------------------------------------------------------------------------- +# _form_master_re() +# +# This function takes a list of all of the regex components and attempts to +# form the master regular expression. Given limitations in the Python re +# module, it may be necessary to break the master regex into separate expressions. +# ----------------------------------------------------------------------------- +def _form_master_re(relist, reflags, ldict, toknames): + if not relist: + return [] + regex = '|'.join(relist) try: - f = open(filename) - lines = f.readlines() - f.close() - except IOError: - return 1 # Oh well - - fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') - sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') - counthash = { } - linen = 1 - noerror = 1 - for l in lines: - m = fre.match(l) - if not m: - m = sre.match(l) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev) - noerror = 0 - linen += 1 - return noerror + lexre = re.compile(regex, reflags) + + # Build the index to function map for the matching engine + lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) + lexindexnames = lexindexfunc[:] + + for f, i in lexre.groupindex.items(): + handle = ldict.get(f, None) + if type(handle) in (types.FunctionType, types.MethodType): + lexindexfunc[i] = (handle, toknames[f]) + lexindexnames[i] = f + elif handle is not None: + lexindexnames[i] = f + if f.find('ignore_') > 0: + lexindexfunc[i] = (None, None) + else: + lexindexfunc[i] = (None, toknames[f]) + + return [(lexre, lexindexfunc)], [regex], [lexindexnames] + except Exception: + m = int(len(relist)/2) + if m == 0: + m = 1 + llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) + rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) + return (llist+rlist), (lre+rre), (lnames+rnames) # ----------------------------------------------------------------------------- -# _read_lextab(module) +# def _statetoken(s,names) # -# Reads lexer table from a lextab file instead of using introspection. +# Given a declaration name s of the form "t_" and a dictionary whose keys are +# state names, this function returns a tuple (states,tokenname) where states +# is a tuple of state names and tokenname is the name of the token. For example, +# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') # ----------------------------------------------------------------------------- +def _statetoken(s, names): + parts = s.split('_') + for i, part in enumerate(parts[1:], 1): + if part not in names and part != 'ANY': + break + + if i > 1: + states = tuple(parts[1:i]) + else: + states = ('INITIAL',) + + if 'ANY' in states: + states = tuple(names) + + tokenname = '_'.join(parts[i:]) + return (states, tokenname) + + +# ----------------------------------------------------------------------------- +# LexerReflect() +# +# This class represents information needed to build a lexer as extracted from a +# user's input file. +# ----------------------------------------------------------------------------- +class LexerReflect(object): + def __init__(self, ldict, log=None, reflags=0): + self.ldict = ldict + self.error_func = None + self.tokens = [] + self.reflags = reflags + self.stateinfo = {'INITIAL': 'inclusive'} + self.modules = set() + self.error = False + self.log = PlyLogger(sys.stderr) if log is None else log + + # Get all of the basic information + def get_all(self): + self.get_tokens() + self.get_literals() + self.get_states() + self.get_rules() + + # Validate all of the information + def validate_all(self): + self.validate_tokens() + self.validate_literals() + self.validate_rules() + return self.error + + # Get the tokens map + def get_tokens(self): + tokens = self.ldict.get('tokens', None) + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = tokens + + # Validate the tokens + def validate_tokens(self): + terminals = {} + for n in self.tokens: + if not _is_identifier.match(n): + self.log.error("Bad token name '%s'", n) + self.error = True + if n in terminals: + self.log.warning("Token '%s' multiply defined", n) + terminals[n] = 1 + + # Get the literals specifier + def get_literals(self): + self.literals = self.ldict.get('literals', '') + if not self.literals: + self.literals = '' + + # Validate literals + def validate_literals(self): + try: + for c in self.literals: + if not isinstance(c, StringTypes) or len(c) > 1: + self.log.error('Invalid literal %s. Must be a single character', repr(c)) + self.error = True + + except TypeError: + self.log.error('Invalid literals specification. literals must be a sequence of characters') + self.error = True + + def get_states(self): + self.states = self.ldict.get('states', None) + # Build statemap + if self.states: + if not isinstance(self.states, (tuple, list)): + self.log.error('states must be defined as a tuple or list') + self.error = True + else: + for s in self.states: + if not isinstance(s, tuple) or len(s) != 2: + self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) + self.error = True + continue + name, statetype = s + if not isinstance(name, StringTypes): + self.log.error('State name %s must be a string', repr(name)) + self.error = True + continue + if not (statetype == 'inclusive' or statetype == 'exclusive'): + self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) + self.error = True + continue + if name in self.stateinfo: + self.log.error("State '%s' already defined", name) + self.error = True + continue + self.stateinfo[name] = statetype + + # Get all of the symbols with a t_ prefix and sort them into various + # categories (functions, strings, error functions, and ignore characters) + + def get_rules(self): + tsymbols = [f for f in self.ldict if f[:2] == 't_'] + + # Now build up a list of functions and a list of strings + self.toknames = {} # Mapping of symbols to token names + self.funcsym = {} # Symbols defined as functions + self.strsym = {} # Symbols defined as strings + self.ignore = {} # Ignore strings by state + self.errorf = {} # Error functions by state + self.eoff = {} # EOF functions by state + + for s in self.stateinfo: + self.funcsym[s] = [] + self.strsym[s] = [] + + if len(tsymbols) == 0: + self.log.error('No rules of the form t_rulename are defined') + self.error = True + return + + for f in tsymbols: + t = self.ldict[f] + states, tokname = _statetoken(f, self.stateinfo) + self.toknames[f] = tokname + + if hasattr(t, '__call__'): + if tokname == 'error': + for s in states: + self.errorf[s] = t + elif tokname == 'eof': + for s in states: + self.eoff[s] = t + elif tokname == 'ignore': + line = t.__code__.co_firstlineno + file = t.__code__.co_filename + self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) + self.error = True + else: + for s in states: + self.funcsym[s].append((f, t)) + elif isinstance(t, StringTypes): + if tokname == 'ignore': + for s in states: + self.ignore[s] = t + if '\\' in t: + self.log.warning("%s contains a literal backslash '\\'", f) + + elif tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", f) + self.error = True + else: + for s in states: + self.strsym[s].append((f, t)) + else: + self.log.error('%s not defined as a function or string', f) + self.error = True + + # Sort the functions by line number + for f in self.funcsym.values(): + f.sort(key=lambda x: x[1].__code__.co_firstlineno) + + # Sort the strings by regular expression length + for s in self.strsym.values(): + s.sort(key=lambda x: len(x[1]), reverse=True) + + # Validate all of the t_rules collected + def validate_rules(self): + for state in self.stateinfo: + # Validate all rules defined by functions + + for fname, f in self.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + tokname = self.toknames[fname] + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + continue + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + continue + + if not _get_regex(f): + self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) + if c.match(''): + self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) + self.error = True + except re.error as e: + self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) + if '#' in _get_regex(f): + self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) + self.error = True + + # Validate all rules defined by strings + for name, r in self.strsym[state]: + tokname = self.toknames[name] + if tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", name) + self.error = True + continue + + if tokname not in self.tokens and tokname.find('ignore_') < 0: + self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) + if (c.match('')): + self.log.error("Regular expression for rule '%s' matches empty string", name) + self.error = True + except re.error as e: + self.log.error("Invalid regular expression for rule '%s'. %s", name, e) + if '#' in r: + self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) + self.error = True + + if not self.funcsym[state] and not self.strsym[state]: + self.log.error("No rules defined for state '%s'", state) + self.error = True + + # Validate the error function + efunc = self.errorf.get(state, None) + if efunc: + f = efunc + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + + for module in self.modules: + self.validate_module(module) + + # ----------------------------------------------------------------------------- + # validate_module() + # + # This checks to see if there are duplicated t_rulename() functions or strings + # in the parser input file. This is done using a simple regular expression + # match on each line in the source code of the given module. + # ----------------------------------------------------------------------------- + + def validate_module(self, module): + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + return + + fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') + sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') + + counthash = {} + linen += 1 + for line in lines: + m = fre.match(line) + if not m: + m = sre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) + self.error = True + linen += 1 -def _read_lextab(lexer, fdict, module): - exec "import %s as lextab" % module - lexer.lexre = re.compile(lextab._lexre, re.VERBOSE) - lexer.lexindexfunc = lextab._lextab - for i in range(len(lextab._lextab)): - t = lexer.lexindexfunc[i] - if t: - if t[0]: - lexer.lexindexfunc[i] = (fdict[t[0]],t[1]) - lexer.lextokens = lextab._lextokens - lexer.lexignore = lextab._lexignore - if lextab._lexerrorf: - lexer.lexerrorf = fdict[lextab._lexerrorf] - # ----------------------------------------------------------------------------- # lex(module) # # Build all of the regular expression rules from definitions in the supplied module # ----------------------------------------------------------------------------- -def lex(module=None,debug=0,optimize=0,lextab="lextab"): +def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', + reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): + + if lextab is None: + lextab = 'lextab' + + global lexer + ldict = None - regex = "" - error = 0 - files = { } - lexer = Lexer() - lexer.debug = debug - lexer.optimize = optimize - global token,input - + stateinfo = {'INITIAL': 'inclusive'} + lexobj = Lexer() + lexobj.lexoptimize = optimize + global token, input + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + if debug: + if debuglog is None: + debuglog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the lexer + if object: + module = object + + # Get the module dictionary used for the parser if module: - # User supplied a module object. - if isinstance(module, types.ModuleType): - ldict = module.__dict__ - elif isinstance(module, types.InstanceType): - _items = [(k,getattr(module,k)) for k in dir(module)] - ldict = { } - for (i,v) in _items: - ldict[i] = v - else: - raise ValueError,"Expected a module or instance" - + _items = [(k, getattr(module, k)) for k in dir(module)] + ldict = dict(_items) + # If no __file__ attribute is available, try to obtain it from the __module__ instead + if '__file__' not in ldict: + ldict['__file__'] = sys.modules[ldict['__module__']].__file__ else: - # No module given. We might be able to get information from the caller. - try: - raise RuntimeError - except RuntimeError: - e,b,t = sys.exc_info() - f = t.tb_frame - f = f.f_back # Walk out to our calling function - ldict = f.f_globals # Grab its globals dictionary + ldict = get_caller_module_dict(2) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = ldict.get('__package__') + if pkg and isinstance(lextab, str): + if '.' not in lextab: + lextab = pkg + '.' + lextab + + # Collect parser information from the dictionary + linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) + linfo.get_all() + if not optimize: + if linfo.validate_all(): + raise SyntaxError("Can't build lexer") if optimize and lextab: try: - _read_lextab(lexer,ldict, lextab) - if not lexer.lexignore: lexer.lexignore = "" - token = lexer.token - input = lexer.input - return lexer - + lexobj.readtab(lextab, ldict) + token = lexobj.token + input = lexobj.input + lexer = lexobj + return lexobj + except ImportError: pass - - # Get the tokens map - if (module and isinstance(module,types.InstanceType)): - tokens = getattr(module,"tokens",None) - else: - try: - tokens = ldict["tokens"] - except KeyError: - tokens = None - - if not tokens: - raise SyntaxError,"lex: module does not define 'tokens'" - if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)): - raise SyntaxError,"lex: tokens must be a list or tuple." + + # Dump some basic debugging information + if debug: + debuglog.info('lex: tokens = %r', linfo.tokens) + debuglog.info('lex: literals = %r', linfo.literals) + debuglog.info('lex: states = %r', linfo.stateinfo) # Build a dictionary of valid token names - lexer.lextokens = { } - if not optimize: + lexobj.lextokens = set() + for n in linfo.tokens: + lexobj.lextokens.add(n) - # Utility function for verifying tokens - def is_identifier(s): - for c in s: - if not (c.isalnum() or c == '_'): return 0 - return 1 - - for n in tokens: - if not is_identifier(n): - print "lex: Bad token name '%s'" % n - error = 1 - if lexer.lextokens.has_key(n): - print "lex: Warning. Token '%s' multiply defined." % n - lexer.lextokens[n] = None + # Get literals specification + if isinstance(linfo.literals, (list, tuple)): + lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) else: - for n in tokens: lexer.lextokens[n] = None - - - if debug: - print "lex: tokens = '%s'" % lexer.lextokens.keys() - - # Get a list of symbols with the t_ prefix - tsymbols = [f for f in ldict.keys() if f[:2] == 't_'] - - # Now build up a list of functions and a list of strings - fsymbols = [ ] - ssymbols = [ ] - for f in tsymbols: - if callable(ldict[f]): - fsymbols.append(ldict[f]) - elif isinstance(ldict[f], types.StringType): - ssymbols.append((f,ldict[f])) - else: - print "lex: %s not defined as a function or string" % f - error = 1 - - # Sort the functions by line number - fsymbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno)) - - # Sort the strings by regular expression length - ssymbols.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1]))) - - # Check for non-empty symbols - if len(fsymbols) == 0 and len(ssymbols) == 0: - raise SyntaxError,"lex: no rules of the form t_rulename are defined." - - # Add all of the rules defined with actions first - for f in fsymbols: - - line = f.func_code.co_firstlineno - file = f.func_code.co_filename - files[file] = None - - ismethod = isinstance(f, types.MethodType) - - if not optimize: - nargs = f.func_code.co_argcount - if ismethod: - reqargs = 2 - else: - reqargs = 1 - if nargs > reqargs: - print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__) - error = 1 - continue + lexobj.lexliterals = linfo.literals - if nargs < reqargs: - print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__) - error = 1 - continue + lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) - if f.__name__ == 't_ignore': - print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__) - error = 1 - continue - - if f.__name__ == 't_error': - lexer.lexerrorf = f - continue + # Get the stateinfo dictionary + stateinfo = linfo.stateinfo - if f.__doc__: - if not optimize: - try: - c = re.compile(f.__doc__, re.VERBOSE) - except re.error,e: - print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e) - error = 1 - continue + regexs = {} + # Build the master regular expressions + for state in stateinfo: + regex_list = [] - if debug: - print "lex: Adding rule %s -> '%s'" % (f.__name__,f.__doc__) + # Add rules defined by functions first + for fname, f in linfo.funcsym[state]: + regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) - # Okay. The regular expression seemed okay. Let's append it to the master regular - # expression we're building - - if (regex): regex += "|" - regex += "(?P<%s>%s)" % (f.__name__,f.__doc__) - else: - print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__) - - # Now add all of the simple rules - for name,r in ssymbols: - - if name == 't_ignore': - lexer.lexignore = r - continue - - if not optimize: - if name == 't_error': - raise SyntaxError,"lex: Rule 't_error' must be defined as a function" - error = 1 - continue - - if not lexer.lextokens.has_key(name[2:]): - print "lex: Rule '%s' defined for an unspecified token %s." % (name,name[2:]) - error = 1 - continue - try: - c = re.compile(r,re.VERBOSE) - except re.error,e: - print "lex: Invalid regular expression for rule '%s'. %s" % (name,e) - error = 1 - continue + # Now add all of the simple rules + for name, r in linfo.strsym[state]: + regex_list.append('(?P<%s>%s)' % (name, r)) if debug: - print "lex: Adding rule %s -> '%s'" % (name,r) - - if regex: regex += "|" - regex += "(?P<%s>%s)" % (name,r) + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) - if not optimize: - for f in files.keys(): - if not validate_file(f): - error = 1 - try: + regexs[state] = regex_list + + # Build the master regular expressions + + if debug: + debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') + + for state in regexs: + lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) + lexobj.lexstatere[state] = lexre + lexobj.lexstateretext[state] = re_text + lexobj.lexstaterenames[state] = re_names if debug: - print "lex: regex = '%s'" % regex - lexer.lexre = re.compile(regex, re.VERBOSE) + for i, text in enumerate(re_text): + debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) + + # For inclusive states, we need to add the regular expressions from the INITIAL state + for state, stype in stateinfo.items(): + if state != 'INITIAL' and stype == 'inclusive': + lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) + lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) + lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) + + lexobj.lexstateinfo = stateinfo + lexobj.lexre = lexobj.lexstatere['INITIAL'] + lexobj.lexretext = lexobj.lexstateretext['INITIAL'] + lexobj.lexreflags = reflags + + # Set up ignore variables + lexobj.lexstateignore = linfo.ignore + lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') + + # Set up error functions + lexobj.lexstateerrorf = linfo.errorf + lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) + if not lexobj.lexerrorf: + errorlog.warning('No t_error rule is defined') + + # Set up eof functions + lexobj.lexstateeoff = linfo.eoff + lexobj.lexeoff = linfo.eoff.get('INITIAL', None) + + # Check state information for ignore and error rules + for s, stype in stateinfo.items(): + if stype == 'exclusive': + if s not in linfo.errorf: + errorlog.warning("No error rule is defined for exclusive state '%s'", s) + if s not in linfo.ignore and lexobj.lexignore: + errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) + elif stype == 'inclusive': + if s not in linfo.errorf: + linfo.errorf[s] = linfo.errorf.get('INITIAL', None) + if s not in linfo.ignore: + linfo.ignore[s] = linfo.ignore.get('INITIAL', '') - # Build the index to function map for the matching engine - lexer.lexindexfunc = [ None ] * (max(lexer.lexre.groupindex.values())+1) - for f,i in lexer.lexre.groupindex.items(): - handle = ldict[f] - if type(handle) in (types.FunctionType, types.MethodType): - lexer.lexindexfunc[i] = (handle,handle.__name__[2:]) + # Create global versions of the token() and input() functions + token = lexobj.token + input = lexobj.input + lexer = lexobj + + # If in optimize mode, we write the lextab + if lextab and optimize: + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If lextab specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(lextab, types.ModuleType): + srcfile = lextab.__file__ else: - # If rule was specified as a string, we build an anonymous - # callback function to carry out the action - lexer.lexindexfunc[i] = (None,f[2:]) - - # If a lextab was specified, we create a file containing the precomputed - # regular expression and index table - - if lextab and optimize: - lt = open(lextab+".py","w") - lt.write("# %s.py. This file automatically created by PLY. Don't edit.\n" % lextab) - lt.write("_lexre = %s\n" % repr(regex)) - lt.write("_lextab = [\n"); - for i in range(0,len(lexer.lexindexfunc)): - t = lexer.lexindexfunc[i] - if t: - if t[0]: - lt.write(" ('%s',%s),\n"% (t[0].__name__, repr(t[1]))) - else: - lt.write(" (None,%s),\n" % repr(t[1])) + if '.' not in lextab: + srcfile = ldict['__file__'] else: - lt.write(" None,\n") - - lt.write("]\n"); - lt.write("_lextokens = %s\n" % repr(lexer.lextokens)) - lt.write("_lexignore = %s\n" % repr(lexer.lexignore)) - if (lexer.lexerrorf): - lt.write("_lexerrorf = %s\n" % repr(lexer.lexerrorf.__name__)) - else: - lt.write("_lexerrorf = None\n") - lt.close() - - except re.error,e: - print "lex: Fatal error. Unable to compile regular expression rules. %s" % e - error = 1 - if error: - raise SyntaxError,"lex: Unable to build lexer." - if not lexer.lexerrorf: - print "lex: Warning. no t_error rule is defined." - - if not lexer.lexignore: lexer.lexignore = "" - - # Create global versions of the token() and input() functions - token = lexer.token - input = lexer.input - - return lexer + parts = lextab.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + try: + lexobj.writetab(lextab, outputdir) + if lextab in sys.modules: + del sys.modules[lextab] + except IOError as e: + errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) + + return lexobj # ----------------------------------------------------------------------------- -# run() +# runmain() # # This runs the lexer as a main program # ----------------------------------------------------------------------------- -def runmain(lexer=None,data=None): +def runmain(lexer=None, data=None): if not data: try: filename = sys.argv[1] @@ -677,7 +1059,7 @@ def runmain(lexer=None,data=None): data = f.read() f.close() except IndexError: - print "Reading from standard input (type EOF to end):" + sys.stdout.write('Reading from standard input (type EOF to end):\n') data = sys.stdin.read() if lexer: @@ -689,12 +1071,29 @@ def runmain(lexer=None,data=None): _token = lexer.token else: _token = token - - while 1: + + while True: tok = _token() - if not tok: break - print "(%s,'%s',%d)" % (tok.type, tok.value, tok.lineno) - - + if not tok: + break + sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) + +# ----------------------------------------------------------------------------- +# @TOKEN(regex) +# +# This decorator function can be used to set the regex expression on a function +# when its docstring might need to be set in an alternative way +# ----------------------------------------------------------------------------- + +def TOKEN(r): + def set_regex(f): + if hasattr(r, '__call__'): + f.regex = _get_regex(r) + else: + f.regex = r + return f + return set_regex +# Alternative spelling of the TOKEN decorator +Token = TOKEN diff --git a/src/ccg2xml/test_xml2ccg.py b/src/ccg2xml/test_xml2ccg.py new file mode 100644 index 0000000..e8e47af --- /dev/null +++ b/src/ccg2xml/test_xml2ccg.py @@ -0,0 +1,351 @@ +import functools +import sys +import unittest + +import xml.etree.ElementTree as ET +import xml.dom.minidom as minidom + +from io import StringIO +from pathlib import Path +from shutil import rmtree +from unittest.mock import patch + +from ccg2xml import main as ccg2xml +from xml2ccg import xml2ccg, XMLGrammar + + +TEST_DATA = Path(__file__).parent.parent.parent / 'test' / 'ccg2xml' +TEMP_DATA = Path(__file__).parent.parent.parent / 'test' / 'ccg2xml' / 'tmp' + + +def xml2ccg_argv(grammar): + """Prepares the command line arguments for xml2ccg. + + Args: + grammar: The grammar to be used. + """ + args = ['xml2ccg', str(TEST_DATA / grammar)] + return patch.object(sys, 'argv', args) + + +def ccg2xml_argv(grammar): + """Prepares the command line arguments for ccg2xml. + + Args: + grammar: The grammar to be used. + """ + args = ['ccg2xml', '-', + '--prefix', '{}-'.format(grammar), + '--quiet', + '--dir', str(TEMP_DATA / grammar)] + return patch.object(sys, 'argv', args) + + +class SysIn: + """Wraps sysin so that the input of ccg2xml can be captured easily.""" + def __enter__(self): + self.original_stdin = sys.stdin + sys.stdin = StringIO() + return sys.stdin + + def __exit__(self, exc_type, exc_value, traceback): + sys.stdin = self.original_stdin + + +class SysOut: + """Wraps sysout so that the output of xml2ccg can be captured easily.""" + def __enter__(self): + self.original_stdout = sys.stdout + sys.stdout = StringIO() + return sys.stdout + + def __exit__(self, exc_type, exc_value, traceback): + sys.stdout = self.original_stdout + + +def compare_type_elements(left, right): + """Compares two elements.""" + if left.tag != 'type' != right.tag: + return False + if left.get('name') != right.get('name'): + return False + if sorted(left.get('parents').split()) != sorted(right.get('parents').split()): + return False + return True + + +def compare_file_tags(left, right): + """Compares two elements with a file attribute <... file="..." />. + Returns True if they are the same, except for different filenames.""" + if 'file' not in left.attrib.keys() and 'file' not in right.attrib.keys(): + return False + if left.tag != right.tag: + return False + return True + + +def flatten_morph_macros_with_fs_feat_val(tree): + """ccg2xml does not handle the special case mentioned in tiny ccg correctly. + + A feature of the form + + case<0>: acc0:p-case; + + should be converted to + + + + + + but instead is converted to + + + + + + + + So far, it seems that this is the only case where feat actually gets + assigned a val, thus this method fixes the situation by replacing + all fs inside a macro inside the tree with a single-element version if and + only if a val is present for the feat element. + """ + for macro in tree.findall('macro'): + fs = macro.find('fs') + if fs is None: + continue + feat = fs.find('feat') + if feat is None: + continue + val = feat.get('val') + if val is None: + continue + + fs.attrib['attr'] = feat.get('attr') + fs.attrib['val'] = val + fs.remove(feat) + return tree + + +def tree_sort_key(elem): + return elem.tag + str(sorted('{}={}'.format(*a) for a in elem.attrib.items())) + + +def recursive_tree_comparison(left, right): + if left.tag != right.tag: + return False + elif left.attrib != right.attrib: + return False + elif left.find('*') is not None and right.find('*') is None: + return False + elif left.find('*') is None: + return True + else: + return all(recursive_tree_comparison(l, r) for l, r in zip(iter(left), iter(right))) + + +def compare_grammar_tree(left, right): + """Compares two XML tree structures. + + If left is None, the comparison is skipped. + + Args: + left: The left tree. + right: The right tree. + """ + + # Skip non-existent original files + if left is None: + return True, (None, None) + + for l in left.findall('entry'): + if l.get('stem') is None and l.get('word') is not None: + l.attrib['stem'] = l.get('word') + + left = flatten_morph_macros_with_fs_feat_val(left) + right = flatten_morph_macros_with_fs_feat_val(right) + + # findall('*') selects children, so the root element is skipped on purpose, + # as its "name" attribute differs depending on how ccg2xml is called. + list_left = sorted(left.findall('*'), key=tree_sort_key) + list_right = sorted(right.findall('*'), key=tree_sort_key) + + iter_right = iter(list_right) + r = None + for l in list_left: + equal = False + first_r = None + while not equal: + try: + r = next(iter_right) + if first_r is None: + first_r = r + except StopIteration: + return False, (l, first_r) + + if recursive_tree_comparison(l, r): + break + + for comp in [compare_type_elements, compare_file_tags]: + if comp(l, r): + equal = True + break + + return True, (None, None) + + +def compare_xmls(test_instance, grammar): + """Loads the TEST and TEMP grammars specified by grammar using XMLGrammar. + + Each individual part, as specified in XMLGrammar.valid_filenames, is + compared and asserted. This is collected as "soft asserts" and reported + completely at the end. + + Args: + test_instance: Should be a test object, usually "self" when using + unittest. + grammar: The grammar to be used. + + Raises: + AssertionError if not all parts of the grammars are equal. + """ + original = XMLGrammar(TEST_DATA / grammar) + generated = XMLGrammar(TEMP_DATA / grammar) + + test_map = {} + for fn in XMLGrammar.valid_filenames: + test, problem = compare_grammar_tree(getattr(original, fn), + getattr(generated, fn)) + test_map[fn] = { + 'original': problem[0], + 'generated': problem[1], + 'test': test, + } + + assert all(test_map[k]['test'] for k in test_map.keys()), generate_message(test_map) + + +def generate_message(test_map): + """Generates a slightly more informative assertion message in case a test fails. + + Args: + test_map: A test map of the form: + { + 'test_key': { + 'original': the expected value, + 'generated': the actual value, + 'test': the test result (True or False) + } + ... + } + Returns: + A string listing each test key with its original and generated values, + but only for those where the test value is False. + """ + tmpl = """{fn} test failed: + Generated: + {generated} + + Original: + {original} + """ + messages = [] + + def prettify(xml): + lines = [] + pretty = minidom.parseString(ET.tostring(xml)).toprettyxml() + for line in pretty.splitlines(): + if len(line.strip()) != 0: + lines.append(line) + return '\n '.join(lines) + + for k, v in test_map.items(): + if v['test']: + continue + generated = prettify(v['generated']) + original = prettify(v['original']) + messages.append(tmpl.format(fn=k, generated=generated, original=original)) + + return '\n'.join(['The two grammars do not match.'] + messages) + + +def test_grammar(grammar): + """Simplifies access to multiple similar test cases. + + A function annotated with test_grammar converts the grammar inside the + TEST_DATA directory with xml2ccg into a ccg file and passes it to ccg2xml. + ccg2xml in turn outputs the new set of xml files into TEMP_DATA / grammar. + + Then, both xml grammars are loaded and their xml files compared. + + Args: + grammar: The grammar to be used. + """ + def wrapper(func): + @functools.wraps(func) + def test_function(self, *args, **kwargs): + + with xml2ccg_argv(grammar), SysOut() as sout: + xml2ccg() + generated_ccg = sout.getvalue() + + with ccg2xml_argv(grammar), SysIn() as sin: + sin.write(generated_ccg) + sin.seek(0) + ccg2xml() + + compare_xmls(self, grammar) + + return test_function + return wrapper + + +class TestXML2CCG(unittest.TestCase): + """This class tests the xml2ccg functionality in conjunction with ccg2xml. + + While the test methods only use the generated XML files present in + subdirectories of ${OPENCCG_HOME}/test/ccg2xml, the doc strings of each + test method mention the original source files. + + There are some *.ccg files inside the ${OPENCCG_HOME}/test/ccg2xml + directory. Those are specialized test cases and also available as + pre-compiled xml grammar directories. + """ + + @test_grammar('grammar_template') + def test_grammar_template(self): + """${OPENCCG_HOME}/src/ccg2xml/grammar_template.ccg""" + pass + + @test_grammar('tinytiny') + def test_tinytiny(self): + """${OPENCCG_HOME}/ccg-format-grammars/tinytiny/tinytiny.ccg""" + pass + + @test_grammar('tiny') + def test_tiny(self): + """${OPENCCG_HOME}/ccg-format-grammars/tiny/tiny.ccg""" + pass + + @test_grammar('inherit') + def test_inherit(self): + """${OPENCCG_HOME}/ccg-format-grammars/inherit/inherit.ccg""" + pass + + @test_grammar('arabic') + def test_arabic(self): + """${OPENCCG_HOME}/ccg-format-grammars/arabic/arabic.ccg + ${OPENCCG_HOME}/src/ccg2xml/arabic.ccg + (Both files are the same) + """ + pass + + @test_grammar('diaspace') + def test_diaspace(self): + """${OPENCCG_HOME}/ccg-format-grammars/diaspace/diaspace.ccg""" + pass + + @classmethod + def tearDownClass(cls): + """Removes the temp data after the tests.""" + rmtree(TEMP_DATA) diff --git a/src/ccg2xml/xml2ccg.py b/src/ccg2xml/xml2ccg.py new file mode 100644 index 0000000..ee122e6 --- /dev/null +++ b/src/ccg2xml/xml2ccg.py @@ -0,0 +1,868 @@ +#!/usr/bin/env python3 + +import argparse +import re +import sys +import xml.etree.ElementTree as ET +from collections import OrderedDict +from pathlib import Path +from datetime import datetime as dt + + +GRAMMAR_TEMPLATE = """\ +{grammar_name:#^57} +# +# This grammar was automatically generated from OpenCCG +# xml files using the xml2ccg tool. +# +# Conversion date: {conversion_date} +# +# For a tutorial on using this file, please refer to +# http://www.utcompling.com/wiki/openccg/visccg-tutorial +# + +###################### Features ######################### + +{features} + +######################## Words ########################## + +{words} + +######################## Rules ########################## + +{rules} + +################# Lexicon/Categories #################### + +{lexicon} + +####################### Testbed ######################### + +{testbed} +""" + + +class XMLGrammar: + """This class wraps a single directory containing all xml files for an + OpenCCG grammar into a flat API. + + Each file (grammar.xml, morph.xml, ...), as defined in + XMLGrammar.valid_filenames, can be accessed as an XML ElementTree as a + simple attribute: + + xml_grammar = XMLGrammar(path_to_grammar) + xml_grammar.morph # The parsed (prefix-)morph.xml of the grammar + xml_grammar.rules # The parsed (prefix-)rules.xml of the grammar + """ + valid_filenames = ['grammar', 'morph', 'lexicon', 'rules', 'types', 'testbed'] + + def __init__(self, path): + self.path = Path(path) + self.cache = {} + + def __getattribute__(self, attr): + """Allows to access the file contents of an XMLGrammar in a more + pythonic way. + + grammar = XMLGrammar('example-grammar') + grammar.morph + + will be the xml root element of example-grammar/prefix-morph.xml. + This works identically for grammar, morph, lexicon, rules, types, and testbed. + """ + if attr in XMLGrammar.valid_filenames: + try: + if attr not in self.cache: + self.cache[attr] = ET.parse(next(self.path.glob('*' + attr + '.xml'))).getroot() + except (StopIteration, ET.ParseError): + self.cache[attr] = None + return self.cache[attr] + return object.__getattribute__(self, attr) + + @property # noqa: MC0001 ("too complex") + def ccg_features(self): + """Generates a feature { ... } string for the ccg file.""" + if self.types is None: + return 'feature {\n}' + + # Get all types / features from types.xml + features = OrderedDict() + for type_ in self.types.iter('type'): + feature = Feature(type_) + if feature.name in features: + raise KeyError('Feature {} specified twice in types.'.format(feature.name)) + features[feature.name] = feature + + special_macros = [] + # Get all feature structure ids from morph.xml + for macro in self.morph.iter('macro'): + feature_val = None + # Simple numeric id + if macro.find('fs') is not None: + fs = macro.find('fs') + feature_id = fs.get('id') + feature = fs.get('attr') + if fs.find('feat') is not None: + feature = fs.find('feat').get('attr') + feature_val = fs.find('feat').get('val') + if fs.get('val') is not None: + feature_val = fs.get('val') + special_macro = SpecialMacro(macro.get('name'), + feature_id, + feature, + feature_val) + special_macros.append(special_macro) + continue + # declaration + elif macro.find('lf') is not None: + feature = macro.get('name')[1:] + lf = macro.find('lf') + satop = lf.find('satop') + feature_id = maybe_quote(satop.get('nomvar')) + if satop.find('diamond') is not None: + diamond = satop.find('diamond') + mode = diamond.get('mode') + else: + mode = satop.find('prop').get('name') + if feature_id != mode: + feature_id = '{}:{}'.format(feature_id, mode) + else: + # TODO(shoeffner): Is there actually a different case? ccg2xml + # can not create more than this. + continue + try: + features[feature].feature_struct_ids.append(feature_id) + except KeyError: # New feature from a macro definition + features[feature] = Feature(xml_type=macro, val=feature_val) + features[feature].name = feature + features[feature].feature_struct_ids.append(feature_id) + + # Traverse again to add all children to their parents (second pass to + # ensure all parents exist) + for feature in features.values(): + parents = [] + if feature.xml is not None: + parents = feature.xml.get('parents', '').split() + prop = feature.xml.find('./lf/satop/diamond/prop') + if prop is not None: + name = prop.get('name') + if name in features: + sf = SimpleFeature(name) + feature.children.append(sf) + feature.toplevel = True + if parents: + if len(parents) > 1: + feature.additional_parents += parents[1:] + features[parents[0]].children.append(feature) + + # Add special macros and keep the inheritance structure intact by + # adding finding their parents + for feature in special_macros: + if feature.attr in features: + feature.additional_parents += feature.attr + features[feature.attr + feature.name] = feature + + # Determine which features are distributive + dist_features = self.lexicon.find('distributive-features') + if dist_features is not None: + for feature_name in dist_features.get('attrs', '').split(): + features[feature_name].distributive = True + + # Determine licensing features + lic_features = self.lexicon.find('licensing-features') + if lic_features is not None: + for feat in lic_features.iter('feat'): + feature_name = feat.get('val', feat.get('attr')) + features[feature_name].licensing_features.update(feat.attrib) + + # Print all toplevel features (all others are thus printed implicitly) + feature_string = '\n\n '.join(str(f) for f in features.values() if f.toplevel) + + # Relation sorting can also be specified + rel_sort_str = '' + rel_sort = self.lexicon.find('relation-sorting') + if rel_sort is not None: + rel_sort_str = rel_sort.get('order') + rel_sort_str = '\n\nrelation-sorting: {};'.format(rel_sort_str) + + feature_section = 'feature {{\n {}\n}}'.format(feature_string) + rel_sort_str + return feature_section + + @property + def ccg_words(self): + """Generates a flat list of word entries. + + Most ccg files contain macros to make e.g. 3rd pers sg simpler. We + do not have access to these macros and we don't try anything fancy + to infer them, thus the section will be blown up a lot compared to a + manually crafted words section. + + Note that grouping is performed where possible. + """ + if self.morph is None: + return '' + + # Find all words + words = map(Word, self.morph.iter('entry')) + + # Note that at this point '\n'.join(map(str, words)) should + # already be a valid ccg file. However, it is better to compress this + # format a little bit, thus the words can be merged into groups. This + # is still less elegant than macros, but to infer plural-s or + # 3rd-person singular-s macros is rather difficult, so we leave it as + # this for now. + + # Merge words with the same header into groups + word_groups = {} + for word in words: + header = word.header() + if header not in word_groups: + word_groups[header] = [] + word_groups[header].append(word) + + # Format groups or single words + word_strings = [] + for header, group in word_groups.items(): + if len(group) == 1: + word_strings.append(str(group[0])) + else: + bodies = '\n '.join(w.body(True) for w in group) + word_string = '{} {{\n {}\n}}' + word_strings.append(word_string.format(header, bodies)) + + words_section = '\n'.join(word_strings) + return words_section + + def _create_simple_rules(self, tag, rulename): + """Parses simple rules for a tag into the rules rulename and + xrulename. + + If the attribute harmonic is true, the normal rule is modified + according to the dir attribute (forward = +, backward = -), + if harmonic is false, the cross-rulename (xrulename) is modified. + + The results are e.g.: + + app +; + xapp +-; + + for tag applications with the rulename app, in which the + harmonic = false rules were given for both directions, while for + the harmonic = true rules, only the forward direction was given. + + Returns: + A list of rule strings, e.g. ['app +;', 'xapp +-;'] + """ + directions = { + 'forward': '+', + 'backward': '-' + } + + rule = '' + xrule = '' + for item in self.rules.iter(tag): + if item.get('harmonic', 'true') == 'true': + rule += directions[item.get('dir')] + else: + xrule += directions[item.get('dir')] + if rule: + rule = '{} {};'.format(rulename, rule) + if xrule: + xrule = 'x{} {};'.format(rulename, xrule) + return [rule, xrule] + + def _create_typeraising_rules(self): + """Parses all typeraising rules. + + This works similar to the create_simple_rules function, but + typeraise rules don't have the harmonic argument but useDollar, + as well as additional subelements. + """ + directions = { + 'forward': '+', + 'backward': '-' + } + + rules = [] + for item in self.rules.iter('typeraising'): + rule = 'typeraise {}'.format(directions[item.get('dir')]) + if item.get('useDollar', 'false') == 'true': + rule += ' $' + + # complex rule: argument => result + if item.find('arg') is not None: + argument = CategoryParser().parse_cat(item.find('arg').find('*')) + rule += ': {}'.format(argument) + + if item.find('result') is not None: + result = CategoryParser().parse_cat(item.find('result').find('*')) + rule += '=> {}'.format(result) + + rules.append(rule + ';') + return rules + + def _create_typechanging_rules(self): + """Parses all typechanging rules. + + This works similar to the create_typeraising_rules function, + typechanging rules have a slightly different format. + """ + rules = [] + for item in self.rules.iter('typechanging'): + rule = 'typechange' + + # complex rule: argument => result + if item.find('arg') is not None and item.find('result') is not None: + argument = CategoryParser().parse_cat(item.find('arg').find('*')) + result = CategoryParser().parse_cat(item.find('result').find('*')) + rule += ': {} => {}'.format(argument, result) + + rules.append(rule + ';') + return rules + + @property + def ccg_rules(self): + """Create the rule section for a ccg file. + + Due to simplicity, this algorithm just assumes a reset of all defaults + and marks each rule explicitly. + """ + if self.rules is None: + return '' + + rules = ['no;'] # Remove all defaults + rules += self._create_simple_rules('application', 'app') + rules += self._create_simple_rules('composition', 'comp') + rules += self._create_simple_rules('substitution', 'sub') + rules += self._create_typeraising_rules() + rules += self._create_typechanging_rules() + + rule_section = 'rule {{\n {}\n}}'.format('\n '.join(r for r in rules if r)) + # @shoeffner: Not sure if this is needed, but just in case put forward (+) first + rule_section = rule_section.replace('-+', '+-') + return rule_section + + @property + def ccg_lexicon(self): + """The lexicon is a printout of all families. + Families are parsed inside the Family class. + """ + if self.lexicon is None: + return '' + + families = map(Family, self.lexicon.iter('family')) + + lexicon_section = '\n\n'.join(map(str, families)) + return lexicon_section + + @property + def ccg_testbed(self): + """Creates the ccg testbed. + + The testbed is as a section with a list of sentences and parse numbers: + + testbed { + one sentence: 1; + another sentence: 2; + wrong sentence: 0; + ! known failure: 0; + } + """ + if self.testbed is None: + return 'testbed {\n}' + + fmt = ' {known}{string}: {numOfParses};' + + lines = [] + for item in self.testbed.iter('item'): + known = '! ' if item.get('known') == 'true' else '' + line = fmt.format(known=known, + string=item.get('string'), + # Omitting the number is, according to tiny.ccg, + # maybe equivalent to 1 + numOfParses=item.get('numOfParses', '1')) + lines.append(line) + testbed_section = 'testbed {\n' + '\n'.join(lines) + '\n}' + return testbed_section + + @property + def ccg(self): + """Converts this XMLGrammar to a ccg file string. + + Returns: + A string which can be stored as a *.ccg file and converted via + ccg2xml. In fact, the GRAMMAR_TEMPLATE is populated with whatever is + needed. + """ + sections = { + 'features': self.ccg_features, + 'words': self.ccg_words, + 'rules': self.ccg_rules, + 'lexicon': self.ccg_lexicon, + 'testbed': self.ccg_testbed + } + + # Fill the GRAMMAR_TEMPLATE. + # Make sure the grammar name is wrapped in spaces and ends in ccg to follow + # the example grammars included in OpenCCG. + return GRAMMAR_TEMPLATE.format(grammar_name=' {}.ccg '.format(self.path.stem), + conversion_date=dt.now().isoformat(), + **sections) + + +class Feature: + def __init__(self, xml_type=None, val=None): + if xml_type is None: + self.xml = None + self.name = None + self.toplevel = False + else: + self.xml = xml_type + # found in types.xml + self.name = xml_type.get('name').replace('@', '') + # toplevel if it has no parents + self.toplevel = xml_type.get('parents') is None + + self.val = val + self.licensing_features = {} + self.children = [] + + # Denoted with ! in ccg, found in lexicon in distributive-features + self.distributive = False + + # Explicit parents in case of multiple inheritance + self.additional_parents = [] + + # Feature structure IDs + self.feature_struct_ids = [] + + def _gather_feature_struct_ids(self): + features = self.feature_struct_ids.copy() + for child in self.children: + features += child._gather_feature_struct_ids() + return list(set(features)) + + def __str__(self, depth=0): + """Features are represented in various different ways depending on + the feature hierarchy, available information, and their function as + either a type or a macro. The __str__ function tries to capture all + these nuances with a basic format and some shortcuts for special + features (i.e. implicit macro features). + """ + fmt = '{dist}{name}{syntactic}{licensing}{parents}{colon}{children}{semicolon}' + + dist = '' + syntactic = '' + parents = '' + colon = '' + children = ' '.join(child.__str__(depth+1) for child in self.children) + semicolon = '' + licensing = '' + + if self.val is not None: # Special feature + if depth == 0: + return '{}<{}>: {}:{};'.format(maybe_quote(self.name), + ','.join(self.feature_struct_ids), + maybe_quote(self.xml.get('attr')), + maybe_quote(self.val)) + + if self.toplevel: + dist = '!' if self.distributive else '' + + # gather feature structure ids from all children + features = self._gather_feature_struct_ids() + if features: + syntactic = '<{}>'.format(','.join(features)) + if self.children: + colon = ': ' + semicolon = ';' + else: + if self.additional_parents: + parents = '[{}]'.format(' '.join(self.additional_parents)) + if children: + if depth > 0: + children = ' {{\n {spaces}{children}\n{spaces}}}\n{spaces}'\ + .format(spaces=' ' * depth, children=children) + else: + children = '{{{}}}'.format(children) + + if self.licensing_features: + licensing = ', '.join('{}={}'.format(k, v) + for k, v in self.licensing_features.items() + if k not in ['attr', 'val']) + licensing = '(' + licensing + ')' + + return fmt.format(dist=dist, + name=maybe_quote(self.name), + syntactic=syntactic, + parents=parents, + colon=colon, + children=children, + semicolon=semicolon, + licensing=licensing) + + +class SimpleFeature(Feature): + """A SimpleFeature is an implicit feature which is only a single name entry. + + It is likely that this occurs only in slightly incorrect hand-written xml + grammars, however the results seem to fix most issues inside the + (re-)generated xml grammar. + """ + def __init__(self, name): + super().__init__() + self.name = name + + def __str__(self, depth=0): + if depth != 0 and self.additional_parents: + return '{}[{}]'.format(maybe_quote(self.name), + ' '.join(map(maybe_quote, self.additional_parents))) + return maybe_quote(self.name) + + +class SpecialMacro(Feature): + """Allows to do create explicit feature instructions. + + + + + + is converted to + + case<0>: acc0:p-case; + """ + + def __init__(self, name, feature_id, feature, val): + super().__init__() + self.toplevel = True + self.name = name.replace('@', '') + self.id = feature_id + self.attr = feature + self.val = val + + def __str__(self, depth=0): + if depth == 0: + fs = ['{}<{}>: {}:{};'.format(maybe_quote(self.attr), + maybe_quote(self.id), + maybe_quote(self.name), + maybe_quote(self.val))] + # A toplevel special feature consideres its children as siblings + for f in self.children: + fs.append('{}<{}>: {}:{};'.format(maybe_quote(f.attr), + maybe_quote(f.id), + maybe_quote(f.name), + maybe_quote(f.val))) + return '\n '.join(fs) + + else: + return '{}:{}'.format(maybe_quote(self.name), + maybe_quote(self.val)) + + +def maybe_quote(word): + """In ccg, atoms must match /[a-zA-Z0-9-+%_*]+/ or be a quoted string. If + an atom from an xml file does not match that expression, it will be quoted. + + If an expression contains ' and ", this function raises a ValueError -- the + easiest way to solve this is to change the grammar xml files. (Note that no + escaping is taken into account). + If only one type of quotes is present, the other type is used to quote the + string, which is again allowed in ccg. + """ + if re.match('.*[^a-zA-Z0-9-+%_*]+.*', word): + if "'" in word and '"' in word: + raise ValueError('Can not handle single and double quotes in a single word: {}'.format(word)) + elif "'" in word: + return '"{}"'.format(word) + else: + return "'{}'".format(word) + return word + + +class Word: + def __init__(self, xml_entry): + self.xml = xml_entry + self.form = xml_entry.get('word') # inflected form + self.stem = xml_entry.get('stem', self.form) + self.form = maybe_quote(self.form) + self.stem = maybe_quote(self.stem) + self.family = xml_entry.get('pos', '') + self.attributes = xml_entry.get('class', '').split() + for key in ['pred', 'excluded', 'coart']: + val = xml_entry.get(key) + if val is not None: + self.attributes.append('{}={}'.format(key, val)) + self.features = [m[1:] for m in xml_entry.get('macros', '').split()] + + def header(self): + """Returns the header part of a word, that is its steam, family, + and possible attributes, such as classes. The header is always + prefixed by 'word'. + """ + fmt = 'word {stem}{family_colon}{family}{attr}' + + family_colon = ':' if self.family else '' + attr = '' + if self.attributes: + attr = '({})'.format(','.join(self.attributes)) + + return fmt.format(stem=self.stem, + family_colon=family_colon, + family=maybe_quote(self.family), + attr=attr) + + def body(self, explicit=False): + """Returns the body part of a word, that is its form and features, + of which there are macro names. + + Args: + explicit: If explicit is True, the form will be assumed to be + different from its stem, even if they are the same. + This is important for word group, where the stem also + equals one of the inflected forms. + """ + fmt = '{form}{features};' + + form = '' + features = '' + if self.form != self.stem or explicit: + form = self.form + if self.features: + form += ': ' + if self.features: + features = ' '.join(map(maybe_quote, self.features)) + + return fmt.format(form=form, + features=features) + + def __str__(self): + fmt = '{header}{colon}{body}' + + colon = '' + if self.form != self.stem: + colon = ' ' + fmt = '{header}{colon}{{\n {body}\n}}' + elif self.features: + colon = ': ' + + return fmt.format(header=self.header(), + colon=colon, + body=self.body()) + + +class FamilyEntry: + def __init__(self, xml_entry): + self.name = xml_entry.get('name') + self.category = CategoryParser().parse_cat(xml_entry.find('*')) + + def __str__(self): + fmt = 'entry{name}: {catstring};' + + name = ' ' + maybe_quote(self.name) if not self.name.startswith('Entry-') else '' + catstring = self.category + + return fmt.format(name=name, catstring=catstring).replace('[*DEFAULT*]', '*') + + +class CategoryParser: + """The CategoryParser recursively parses family rules. + + Usually, just the parse_cat function should be called.""" + def parse_cat(self, cat, depth=0): + """This function directs the parsing to the correct function depending + on the categories xml tag name. It prepends values with a colon. + """ + if cat.tag == 'complexcat': + return self.parse_complexcat(cat, depth=depth) + elif cat.tag == 'atomcat': + return self.parse_atomcat(cat) + elif cat.tag == 'setarg': + return self.parse_setarg(cat) + elif cat.tag == 'slash': + return self.parse_slash(cat) + elif cat.tag == 'dollar': + return self.parse_dollar(cat) + elif cat.tag == 'lf': + return ': ' + self.parse_lf(cat) + else: + raise ValueError('Unknown tag {}'.format(str(cat))) + + def parse_complexcat(self, complexcat, depth=0): + if depth > 0: + fmt = '({})' + else: + fmt = '{}' + return fmt.format(''.join(map(lambda x: self.parse_cat(x, depth+1), complexcat))) + + def parse_atomcat(self, atomcat): + fmt = '{type}{fs}' + type_ = maybe_quote(atomcat.get('type', '')) + fs = [] + lf = [] + for elem in atomcat: + if elem.tag == 'fs': + fs.append(self.parse_fs(elem)) + elif elem.tag == 'lf': + fmt = '{type}{fs}: {lf}' + lf.append(self.parse_lf(elem)) + if lf: + return fmt.format(type=type_, fs=''.join(fs), lf=''.join(lf)) + return fmt.format(type=type_, fs=''.join(fs)) + + def parse_setarg(self, setarg): + results = ''.join(map(self.parse_cat, setarg)) + return '{{{}}}'.format(results) + + def parse_slash(self, slash): + mode = slash.get('mode', '') + dir = slash.get('dir', '') + if dir == '' and mode == '': + return '' + # If only a mode is given, assume default dir for that mode + if dir == '': + if mode == '.': + dir = '|' + elif mode == '>': + dir = '/' + elif mode == '<': + dir = '\\' + if (dir == '/' and mode == '>') \ + or (dir == '\\' and mode == '<') \ + or (dir == '|' and mode == '.'): + mode = '' + return ' {}{} '.format(dir, mode) + + def parse_dollar(self, dollar): + return '${}'.format(dollar.get('name', '')) + + def parse_diamond(self, diamond): + if len(diamond) == 1: + fmt = '<{mode}>{name}' + mode = maybe_quote(diamond.get('mode', '')) + name = diamond.find('*').get('name', '') + return fmt.format(mode=mode, name=name) + fmt = '<{mode}>({props})' + mode = diamond.get('mode', '') + dstrings = [] + for elem in diamond: + if elem.tag == 'diamond': + dstrings.append(self.parse_diamond(elem)) + elif elem.tag in ['nomvar', 'prop']: + dstrings.append(elem.get('name', '')) + else: + raise ValueError("Unknown tag inside 'diamond': {}".format(elem.tag)) + dstrings.append('^') + if dstrings[-1] == '^': + del dstrings[-1] + return fmt.format(mode=mode, props=' '.join(dstrings)) + + def parse_lf(self, lf): + satop = lf.find('satop') + cat = satop.get('nomvar', '') + + catprops = [] + for elem in satop: + if elem.tag == 'prop': + catprops.append(elem.get('name', '')) + elif elem.tag == 'diamond': + diamond = self.parse_diamond(elem) + if diamond: + catprops.append(diamond) + if len(catprops) > 0: + cat += '({})'.format(' '.join(catprops)) + + return cat + + def parse_fs(self, fs): + result = '' + + id_ = fs.get('id') + if id_ is not None: + result += '<{id}>'.format(id=id_) + else: + inherited = fs.get('inheritsFrom') + if inherited is not None: + result += '<~{}>'.format(inherited) + + features = [] + for feat in fs.iter('feat'): + if feat.get('attr') == 'index': + nametag = feat.find('lf/nomvar') + features.append(nametag.get('name')) + continue + nametag = feat.find('featvar') + name = feat.get('attr') + if nametag is not None: + nametag = nametag.find('nomvar') or nametag + nomvar_name = nametag.get('name') + if nomvar_name.lower() != name: + name = nomvar_name + val = feat.get('val') + if val is not None: + name += '={}'.format(maybe_quote(val) if val != '[*DEFAULT*]' else val) + if name is not None: + features.append(name) + + if features: + result += '[{}]'.format(' '.join(features)) + + return result + + +class Family: + def __init__(self, xml_family): + self.xml = xml_family + self.name = xml_family.get('name') + self.pos = xml_family.get('pos') + + self.entries = [FamilyEntry(entry) for entry in xml_family.iter('entry')] + self.members = [maybe_quote(member.get('stem')) for member in xml_family.iter('member')] + + # store all additional attributes, which are not special in some sense + captured = ['pos', 'name', 'closed'] + self.attributes = {k: v for k, v in xml_family.attrib.items() if k not in captured} + + def __str__(self): + fmt = ('family {name}{attr} {{\n' + ' {entries}\n' + ' {members}\n' + '}}') + + attributes = [] if self.name == self.pos else [maybe_quote(self.pos)] + attributes += ['{}="{}"'.format(maybe_quote(k), maybe_quote(v)) for k, v in self.attributes.items()] + attr = ('(' + ', '.join(attributes) + ')') if attributes else '' + + entries = '\n '.join(map(str, self.entries)) + members = '' + if len(self.members) > 0: + members = 'member: ' + (', '.join(self.members)) + ';' + + return fmt.format(name=maybe_quote(self.name), + attr=attr, + entries=entries, + members=members) + + +def xml2ccg(): + """Entry point of the program.""" + arguments = parse_args() + print(arguments.folder.ccg, file=arguments.output) + + +def parse_args(): + """Parses the program aguments. + + Returns: + The parsed arguments. + """ + parser = argparse.ArgumentParser() + parser.add_argument('folder', nargs='?', type=XMLGrammar, default='.', + help="The path to the directory containing the grammar's xml files.") + parser.add_argument('-o', '--output', nargs='?', type=argparse.FileType('w'), + default=sys.stdout, help="The output file. Defaults to STDOUT.") + return parser.parse_args() + + +if __name__ == '__main__': + xml2ccg() diff --git a/src/ccg2xml/yacc.py b/src/ccg2xml/yacc.py index b928c07..35bf243 100644 --- a/src/ccg2xml/yacc.py +++ b/src/ccg2xml/yacc.py @@ -1,32 +1,41 @@ -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # ply: yacc.py # -# Author(s): David M. Beazley (dave@dabeaz.com) +# Copyright (C) 2001-2018 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. # -# Copyright (C) 2001-2005, David M. Beazley +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: # -# $Header: /cvsroot/openccg/openccg/src/ccg2xml/yacc.py,v 1.1 2006/09/30 08:11:29 benwing Exp $ -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# See the file COPYING for a complete copy of the LGPL. +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. # +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined -# as Python functions. Roughly speaking, this module is a cross between -# John Aycock's Spark system and the GNU bison utility. +# as Python functions. The grammar is specified by supplying the BNF inside +# Python documentation strings. The inspiration for this technique was borrowed +# from John Aycock's Spark parsing system. PLY might be viewed as cross between +# Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple @@ -36,8 +45,10 @@ # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) -# support was implemented by Elias Ioup (ezioup@alumni.uchicago.edu) -# and hacked abit by Dave to run faster. +# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), +# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, +# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced +# by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # @@ -48,7 +59,15 @@ # own risk! # ---------------------------------------------------------------------------- -__version__ = "1.6" +import re +import types +import sys +import os.path +import inspect +import warnings + +__version__ = '3.11' +__tabversion__ = '3.10' #----------------------------------------------------------------------------- # === User configurable parameters === @@ -56,19 +75,126 @@ # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- -yaccdebug = 1 # Debugging mode. If set, yacc generates a +yaccdebug = True # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module -default_lr = 'SLR' # Default LR table generation method +default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode -import re, types, sys, cStringIO, md5, os.path +yaccdevel = False # Set to True if developing yacc. This turns off optimized + # implementations of certain functions. + +resultlimit = 40 # Size limit of results when running in debug mode. + +pickle_protocol = 0 # Protocol to use when writing pickle files + +# String type-checking compatibility +if sys.version_info[0] < 3: + string_types = basestring +else: + string_types = str + +MAXINT = sys.maxsize + +# This object is a stand-in for a logging object created by the +# logging module. PLY will use this by default to create things +# such as the parser.out file. If a user wants more detailed +# information, they can create their own logging object and pass +# it into PLY. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def debug(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + info = debug + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + critical = debug + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self # Exception raised for yacc-related errors -class YaccError(Exception): pass +class YaccError(Exception): + pass + +# Format the result message that the parser produces when running in debug mode. +def format_result(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) > resultlimit: + repr_str = repr_str[:resultlimit] + ' ...' + result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) + return result + +# Format stack entries when the parser is running in debug mode +def format_stack_entry(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) < 16: + return repr_str + else: + return '<%s @ 0x%x>' % (type(r).__name__, id(r)) + +# Panic mode error recovery support. This feature is being reworked--much of the +# code here is to offer a deprecation/backwards compatible transition + +_errok = None +_token = None +_restart = None +_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). +Instead, invoke the methods on the associated parser instance: + + def p_error(p): + ... + # Use parser.errok(), parser.token(), parser.restart() + ... + + parser = yacc.yacc() +''' + +def errok(): + warnings.warn(_warnmsg) + return _errok() + +def restart(): + warnings.warn(_warnmsg) + return _restart() + +def token(): + warnings.warn(_warnmsg) + return _token() + +# Utility function to call the p_error() function with some deprecation hacks +def call_errorfunc(errorfunc, token, parser): + global _errok, _token, _restart + _errok = parser.errok + _token = parser.token + _restart = parser.restart + r = errorfunc(token) + try: + del _errok, _token, _restart + except NameError: + pass + return r #----------------------------------------------------------------------------- # === LR Parsing Engine === @@ -84,10 +210,15 @@ class YaccError(Exception): pass # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) +# .lexpos = Starting lex position +# .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: - def __str__(self): return self.type - def __repr__(self): return str(self) + def __str__(self): + return self.type + + def __repr__(self): + return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the @@ -95,146 +226,233 @@ def __repr__(self): return str(self) # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines -# for a symbol. +# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) +# representing the range of positional information for a symbol. class YaccProduction: - def __init__(self,s): + def __init__(self, s, stack=None): self.slice = s - self.pbstack = [] - - def __getitem__(self,n): - return self.slice[n].value + self.stack = stack + self.lexer = None + self.parser = None + + def __getitem__(self, n): + if isinstance(n, slice): + return [s.value for s in self.slice[n]] + elif n >= 0: + return self.slice[n].value + else: + return self.stack[n].value - def __setitem__(self,n,v): + def __setitem__(self, n, v): self.slice[n].value = v + def __getslice__(self, i, j): + return [s.value for s in self.slice[i:j]] + def __len__(self): return len(self.slice) - - def lineno(self,n): - return getattr(self.slice[n],"lineno",0) - - def linespan(self,n): - startline = getattr(self.slice[n],"lineno",0) - endline = getattr(self.slice[n],"endlineno",startline) - return startline,endline - - def pushback(self,n): - if n <= 0: - raise ValueError, "Expected a positive value" - if n > (len(self.slice)-1): - raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1) - for i in range(0,n): - self.pbstack.append(self.slice[-i-1]) - -# The LR Parsing engine. This is defined as a class so that multiple parsers -# can exist in the same process. A user never instantiates this directly. -# Instead, the global yacc() function should be used to create a suitable Parser -# object. - -class Parser: - def __init__(self,magic=None): - - # This is a hack to keep users from trying to instantiate a Parser - # object directly. - - if magic != "xyzzy": - raise YaccError, "Can't instantiate Parser. Use yacc() instead." - - # Reset internal state - self.productions = None # List of productions - self.errorfunc = None # Error handling function - self.action = { } # LR Action table - self.goto = { } # LR goto table - self.require = { } # Attribute require table - self.method = "Unknown LR" # Table construction method used + + def lineno(self, n): + return getattr(self.slice[n], 'lineno', 0) + + def set_lineno(self, n, lineno): + self.slice[n].lineno = lineno + + def linespan(self, n): + startline = getattr(self.slice[n], 'lineno', 0) + endline = getattr(self.slice[n], 'endlineno', startline) + return startline, endline + + def lexpos(self, n): + return getattr(self.slice[n], 'lexpos', 0) + + def set_lexpos(self, n, lexpos): + self.slice[n].lexpos = lexpos + + def lexspan(self, n): + startpos = getattr(self.slice[n], 'lexpos', 0) + endpos = getattr(self.slice[n], 'endlexpos', startpos) + return startpos, endpos + + def error(self): + raise SyntaxError + +# ----------------------------------------------------------------------------- +# == LRParser == +# +# The LR Parsing engine. +# ----------------------------------------------------------------------------- + +class LRParser: + def __init__(self, lrtab, errorf): + self.productions = lrtab.lr_productions + self.action = lrtab.lr_action + self.goto = lrtab.lr_goto + self.errorfunc = errorf + self.set_defaulted_states() + self.errorok = True def errok(self): - self.errorcount = 0 + self.errorok = True def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() - sym.type = '$' + sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) - - def parse(self,input=None,lexer=None,debug=0): - lookahead = None # Current lookahead symbol - lookaheadstack = [ ] # Stack of lookahead symbols - actions = self.action # Local reference to action table - goto = self.goto # Local reference to goto table - prod = self.productions # Local reference to production list - pslice = YaccProduction(None) # Production object passed to grammar rules - pslice.parser = self # Parser object - self.errorcount = 0 # Used during error recovery + + # Defaulted state support. + # This method identifies parser states where there is only one possible reduction action. + # For such states, the parser can make a choose to make a rule reduction without consuming + # the next look-ahead token. This delayed invocation of the tokenizer can be useful in + # certain kinds of advanced parsing situations where the lexer and parser interact with + # each other or change states (i.e., manipulation of scope, lexer states, etc.). + # + # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions + def set_defaulted_states(self): + self.defaulted_states = {} + for state, actions in self.action.items(): + rules = list(actions.values()) + if len(rules) == 1 and rules[0] < 0: + self.defaulted_states[state] = rules[0] + + def disable_defaulted_states(self): + self.defaulted_states = {} + + def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + if debug or yaccdevel: + if isinstance(debug, int): + debug = PlyLogger(sys.stderr) + return self.parsedebug(input, lexer, debug, tracking, tokenfunc) + elif tracking: + return self.parseopt(input, lexer, debug, tracking, tokenfunc) + else: + return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parsedebug(). + # + # This is the debugging enabled version of parse(). All changes made to the + # parsing engine should be made here. Optimized versions of this function + # are automatically created by the ply/ygen.py script. This script cuts out + # sections enclosed in markers such as this: + # + # #--! DEBUG + # statements + # #--! DEBUG + # + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parsedebug-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + #--! DEBUG + debug.info('PLY: PARSE DEBUG START') + #--! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: - import lex as lexer + from . import lex + lexer = lex.lexer + # Set up the lexer and parser objects on pslice pslice.lexer = lexer - + pslice.parser = self + # If input was supplied, pass to lexer - if input: + if input is not None: lexer.input(input) - # Tokenize function - get_token = lexer.token + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token - statestack = [ ] # Stack of parsing states + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states self.statestack = statestack - symstack = [ ] # Stack of grammar symbols + symstack = [] # Stack of grammar symbols self.symstack = symstack + pslice.stack = symstack # Put in the production errtoken = None # Err token - # The start state is assumed to be (0,$) + # The start state is assumed to be (0,$end) + statestack.append(0) sym = YaccSymbol() - sym.type = '$' + sym.type = '$end' symstack.append(sym) - - while 1: + state = 0 + while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() + + #--! DEBUG + debug.debug('') + debug.debug('State : %s', state) + #--! DEBUG + + if state not in defaulted_states: if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$' - if debug: - errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip() + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + #--! DEBUG + debug.debug('Defaulted state %s: Reduce using %d', state, -t) + #--! DEBUG - # Check the action table - s = statestack[-1] - ltype = lookahead.type - t = actions.get((s,ltype),None) + #--! DEBUG + debug.debug('Stack : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG if t is not None: if t > 0: # shift a symbol on the stack - if ltype == '$': - # Error, end of input - sys.stderr.write("yacc: Parse error. EOF\n") - return statestack.append(t) - if debug > 1: - sys.stderr.write("%-60s shift state %s\n" % (errorlead, t)) + state = t + + #--! DEBUG + debug.debug('Action : Shift and goto state %s', t) + #--! DEBUG + symstack.append(lookahead) lookahead = None # Decrease error count on successful shift - if self.errorcount > 0: - self.errorcount -= 1 - + if errorcount: + errorcount -= 1 continue - + if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] @@ -245,46 +463,123 @@ def parse(self,input=None,lexer=None,debug=0): sym = YaccSymbol() sym.type = pname # Production name sym.value = None - if debug > 1: - sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t)) + + #--! DEBUG + if plen: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, + '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', + goto[statestack[-1-plen]][pname]) + else: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], + goto[statestack[-1]][pname]) + + #--! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym + + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + try: - sym.lineno = targ[1].lineno - sym.endlineno = getattr(targ[-1],"endlineno",targ[-1].lineno) - except AttributeError: - sym.lineno = 0 - del symstack[-plen:] - del statestack[-plen:] + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + else: - sym.lineno = 0 - targ = [ sym ] - pslice.slice = targ - pslice.pbstack = [] - # Call the grammar rule with our special slice object - p.func(pslice) - - # If there was a pushback, put that on the stack - if pslice.pbstack: - lookaheadstack.append(lookahead) - for _t in pslice.pbstack: - lookaheadstack.append(_t) - lookahead = None - symstack.append(sym) - statestack.append(goto[statestack[-1],pname]) - continue + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] - return getattr(n,"value",None) - sys.stderr.write(errorlead, "\n") + result = getattr(n, 'value', None) + #--! DEBUG + debug.info('Done : Returning %s', format_result(result)) + debug.info('PLY: PARSE DEBUG END') + #--! DEBUG + return result + + if t is None: + + #--! DEBUG + debug.error('Error : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG - if t == None: - if debug: - sys.stderr.write(errorlead + "\n") # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. @@ -295,20 +590,18 @@ def parse(self,input=None,lexer=None,debug=0): # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. - if not self.errorcount: - self.errorcount = error_count + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False errtoken = lookahead - if errtoken.type == '$': + if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: - global errok,token,restart - errok = self.errok # Set some special functions available in error recovery - token = get_token - restart = self.restart - tok = self.errorfunc(errtoken) - del errok, token, restart # Delete special functions - - if not self.errorcount: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead @@ -317,26 +610,29 @@ def parse(self,input=None,lexer=None,debug=0): continue else: if errtoken: - if hasattr(errtoken,"lineno"): lineno = lookahead.lineno - else: lineno = 0 + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 if lineno: - sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: - sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: - sys.stderr.write("yacc: Parse error in input. EOF\n") + sys.stderr.write('yacc: Parse error in input. EOF\n') return else: - self.errorcount = error_count - + errorcount = error_count + # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. - if len(statestack) <= 1 and lookahead.type != '$': + if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None + state = 0 # Nuke the pushback stack del lookaheadstack[:] continue @@ -345,2067 +641,2863 @@ def parse(self,input=None,lexer=None,debug=0): # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack - if lookahead.type == '$': + if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out - return + return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING lookahead = None continue + + # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' - if hasattr(lookahead,"lineno"): - t.lineno = lookahead.lineno + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: - symstack.pop() + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING statestack.pop() + state = statestack[-1] continue # Call an error function here - raise RuntimeError, "yacc: internal parser error!!!\n" - -# ----------------------------------------------------------------------------- -# === Parser Construction === -# -# The following functions and variables are used to implement the yacc() function -# itself. This is pretty hairy stuff involving lots of error checking, -# construction of LR items, kernels, and so forth. Although a lot of -# this work is done using global variables, the resulting Parser object -# is completely self contained--meaning that it is safe to repeatedly -# call yacc() with different grammars in the same application. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# validate_file() -# -# This function checks to see if there are duplicated p_rulename() functions -# in the parser module file. Without this function, it is really easy for -# users to make mistakes by cutting and pasting code fragments (and it's a real -# bugger to try and figure out why the resulting parser doesn't work). Therefore, -# we just do a little regular expression pattern matching of def statements -# to try and detect duplicates. -# ----------------------------------------------------------------------------- - -def validate_file(filename): - base,ext = os.path.splitext(filename) - if ext != '.py': return 1 # No idea. Assume it's okay. - - try: - f = open(filename) - lines = f.readlines() - f.close() - except IOError: - return 1 # Oh well - - # Match def p_funcname( - fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') - counthash = { } - linen = 1 - noerror = 1 - for l in lines: - m = fre.match(l) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev)) - noerror = 0 - linen += 1 - return noerror - -# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix. -def validate_dict(d): - for n,v in d.items(): - if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue - if n[0:2] == 't_': continue - - if n[0:2] == 'p_': - sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n) - if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1: - try: - doc = v.__doc__.split(" ") - if doc[1] == ':': - sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n)) - except StandardError: - pass + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parsedebug-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt(). + # + # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! + # This code is automatically generated by the ply/ygen.py script. Make + # changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery -# ----------------------------------------------------------------------------- -# === GRAMMAR FUNCTIONS === -# -# The following global variables and functions are used to store, manipulate, -# and verify the grammar rules specified by the user. -# ----------------------------------------------------------------------------- -# Initialize all of the global variables used during grammar construction -def initialize_vars(): - global Productions, Prodnames, Prodmap, Terminals - global Nonterminals, First, Follow, Precedence, LRitems - global Errorfunc, Signature, Requires + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer - # LALR(1) globals - global Prodempty, TReductions, NTReductions, GotoSetNum, Canonical - - Productions = [None] # A list of all of the productions. The first - # entry is always reserved for the purpose of - # building an augmented grammar - - Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all - # productions of that nonterminal. - - Prodmap = { } # A dictionary that is only used to detect duplicate - # productions. + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self - Terminals = { } # A dictionary mapping the names of terminal symbols to a - # list of the rules where they are used. + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) - Nonterminals = { } # A dictionary mapping names of nonterminals to a list - # of rule numbers where they are used. + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc - First = { } # A dictionary of precomputed FIRST(x) symbols - - Follow = { } # A dictionary of precomputed FOLLOW(x) symbols + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token - Precedence = { } # Precedence rules for each terminal. Contains tuples of the - # form ('right',level) or ('nonassoc', level) or ('left',level) + # Set up the state and symbol stacks - LRitems = [ ] # A list of all LR items for the grammar. These are the - # productions with the "dot" like E -> E . PLUS E + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack - Errorfunc = None # User defined error handler + pslice.stack = symstack # Put in the production + errtoken = None # Err token - Signature = md5.new() # Digital signature of the grammar rules, precedence - # and other information. Used to determined when a - # parsing table needs to be regenerated. + # The start state is assumed to be (0,$end) - Requires = { } # Requires list + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer - # LALR(1) Initialization - Prodempty = { } # A dictionary of all productions that have an empty rule - # of the form P : - TReductions = { } # A dictionary of precomputer reductions from - # nonterminals to terminals + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] - NTReductions = { } # A dictionary of precomputed reductions from - # nonterminals to nonterminals - GotoSetNum = { } # A dictionary that remembers goto sets based on - # the state number and symbol + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t - Canonical = { } # A list of LR item sets. A LR item set is a list of LR - # items that represent the state of the parser - # File objects used when creating the parser.out debugging file - global _vf, _vfc - _vf = cStringIO.StringIO() - _vfc = cStringIO.StringIO() + symstack.append(lookahead) + lookahead = None -# ----------------------------------------------------------------------------- -# class Production: -# -# This class stores the raw information about a single production or grammar rule. -# It has a few required attributes: -# -# name - Name of the production (nonterminal) -# prod - A list of symbols making up its production -# number - Production number. -# -# In addition, a few additional attributes are used to help with debugging or -# optimization of table generation. -# -# file - File where production action is defined. -# lineno - Line number where action is defined -# func - Action function -# prec - Precedence level -# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E' -# then lr_next refers to 'E -> E PLUS . E' -# lr_index - LR item index (location of the ".") in the prod list. -# lookaheads - LALR lookahead symbols for this item -# len - Length of the production (number of symbols on right hand side) -# ----------------------------------------------------------------------------- + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue -class Production: - def __init__(self,**kw): - for k,v in kw.items(): - setattr(self,k,v) - self.lr_index = -1 - self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure - self.lr1_added = 0 # Flag indicating whether or not added to LR1 - self.usyms = [ ] - self.lookaheads = { } - self.lk_added = { } - self.setnumbers = [ ] - - def __str__(self): - if self.prod: - s = "%s -> %s" % (self.name," ".join(self.prod)) - else: - s = "%s -> " % self.name - return s + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len - def __repr__(self): - return str(self) + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None - # Compute lr_items from the production - def lr_item(self,n): - if n > len(self.prod): return None - p = Production() - p.name = self.name - p.prod = list(self.prod) - p.number = self.number - p.lr_index = n - p.lookaheads = { } - p.setnumbers = self.setnumbers - p.prod.insert(n,".") - p.prod = tuple(p.prod) - p.len = len(p.prod) - p.usyms = self.usyms - - # Precompute list of productions immediately following - try: - p.lrafter = Prodnames[p.prod[n+1]] - except (IndexError,KeyError),e: - p.lrafter = [] - try: - p.lrbefore = p.prod[n-1] - except IndexError: - p.lrbefore = None - return p + if plen: + targ = symstack[-plen-1:] + targ[0] = sym -class MiniProduction: - pass + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING -# Utility function -def is_identifier(s): - for c in s: - if not (c.isalnum() or c == '_'): return 0 - return 1 + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. -# ----------------------------------------------------------------------------- -# add_production() -# -# Given an action function, this function assembles a production rule. -# The production rule is assumed to be found in the function's docstring. -# This rule has the general syntax: -# -# name1 ::= production1 -# | production2 -# | production3 -# ... -# | productionn -# name2 ::= production1 -# | production2 -# ... -# ----------------------------------------------------------------------------- + pslice.slice = targ -def add_production(f,file,line,prodname,syms): - - if Terminals.has_key(prodname): - sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname)) - return -1 - if prodname == 'error': - sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname)) - return -1 - - if not is_identifier(prodname): - sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname)) - return -1 - - for s in syms: - if not is_identifier(s) and s != '%prec': - sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname)) - return -1 - - # See if the rule is already in the rulemap - map = "%s -> %s" % (prodname,syms) - if Prodmap.has_key(map): - m = Prodmap[map] - sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m)) - sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line)) - return -1 - - p = Production() - p.name = prodname - p.prod = syms - p.file = file - p.line = line - p.func = f - p.number = len(Productions) - - - Productions.append(p) - Prodmap[map] = p - if not Nonterminals.has_key(prodname): - Nonterminals[prodname] = [ ] - - # Add all terminals to Terminals - i = 0 - while i < len(p.prod): - t = p.prod[i] - if t == '%prec': - try: - precname = p.prod[i+1] - except IndexError: - sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line)) - return -1 - - prec = Precedence.get(precname,None) - if not prec: - sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname)) - return -1 - else: - p.prec = prec - del p.prod[i] - del p.prod[i] - continue + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False - if Terminals.has_key(t): - Terminals[t].append(p.number) - # Is a terminal. We'll assign a precedence to p based on this - if not hasattr(p,"prec"): - p.prec = Precedence.get(t,('right',0)) - else: - if not Nonterminals.has_key(t): - Nonterminals[t] = [ ] - Nonterminals[t].append(p.number) - i += 1 - - if not hasattr(p,"prec"): - p.prec = ('right',0) - - # Set final length of productions - p.len = len(p.prod) - p.prod = tuple(p.prod) - - # Calculate unique syms in the production - p.usyms = [ ] - for s in p.prod: - if s not in p.usyms: - p.usyms.append(s) - - # Add to the global productions list - try: - Prodnames[p.name].append(p) - except KeyError: - Prodnames[p.name] = [ p ] - return 0 + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# Given a raw rule function, this function rips out its doc string -# and adds rules to the grammar + else: -def add_function(f): - line = f.func_code.co_firstlineno - file = f.func_code.co_filename - error = 0 + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING - if isinstance(f,types.MethodType): - reqdargs = 2 - else: - reqdargs = 1 - - if f.func_code.co_argcount > reqdargs: - sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__)) - return -1 - - if f.func_code.co_argcount < reqdargs: - sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__)) - return -1 - - if f.__doc__: - # Split the doc string into lines - pstrings = f.__doc__.splitlines() - lastp = None - dline = line - for ps in pstrings: - dline += 1 - p = ps.split() - if not p: continue - try: - if p[0] == '|': - # This is a continuation of a previous rule - if not lastp: - sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline)) - return -1 - prodname = lastp - if len(p) > 1: - syms = p[1:] - else: - syms = [ ] - else: - prodname = p[0] - lastp = prodname - assign = p[1] - if len(p) > 2: - syms = p[2:] - else: - syms = [ ] - if assign != ':' and assign != '::=': - sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline)) - return -1 - e = add_production(f,file,dline,prodname,syms) - error += e - except StandardError: - sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps)) - error -= 1 - else: - sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__)) - return error - - -# Cycle checking code (Michael Dyck) - -def compute_reachable(): - ''' - Find each symbol that can be reached from the start symbol. - Print a warning for any nonterminals that can't be reached. - (Unused terminals have already had their warning.) - ''' - Reachable = { } - for s in Terminals.keys() + Nonterminals.keys(): - Reachable[s] = 0 - - mark_reachable_from( Productions[0].prod[0], Reachable ) - - for s in Nonterminals.keys(): - if not Reachable[s]: - sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s) - -def mark_reachable_from(s, Reachable): - ''' - Mark all symbols that are reachable from symbol s. - ''' - if Reachable[s]: - # We've already reached symbol s. - return - Reachable[s] = 1 - for p in Prodnames.get(s,[]): - for r in p.prod: - mark_reachable_from(r, Reachable) + targ = [sym] -# ----------------------------------------------------------------------------- -# compute_terminates() -# -# This function looks at the various parsing rules and tries to detect -# infinite recursion cycles (grammar rules where there is no possible way -# to derive a string of only terminals). -# ----------------------------------------------------------------------------- -def compute_terminates(): - ''' - Raise an error for any symbols that don't terminate. - ''' - Terminates = {} - - # Terminals: - for t in Terminals.keys(): - Terminates[t] = 1 - - Terminates['$'] = 1 - - # Nonterminals: - - # Initialize to false: - for n in Nonterminals.keys(): - Terminates[n] = 0 - - # Then propagate termination until no change: - while 1: - some_change = 0 - for (n,pl) in Prodnames.items(): - # Nonterminal n terminates iff any of its productions terminates. - for p in pl: - # Production p terminates iff all of its rhs symbols terminate. - for s in p.prod: - if not Terminates[s]: - # The symbol s does not terminate, - # so production p does not terminate. - p_terminates = 0 - break - else: - # didn't break from the loop, - # so every symbol s terminates - # so production p terminates. - p_terminates = 1 - - if p_terminates: - # symbol n terminates! - if not Terminates[n]: - Terminates[n] = 1 - some_change = 1 - # Don't need to consider any more productions for this n. - break + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. - if not some_change: - break + pslice.slice = targ - some_error = 0 - for (s,terminates) in Terminates.items(): - if not terminates: - if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error': - # s is used-but-not-defined, and we've already warned of that, - # so it would be overkill to say that it's also non-terminating. - pass - else: - sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s) - some_error = 1 + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False - return some_error + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# ----------------------------------------------------------------------------- -# verify_productions() -# -# This function examines all of the supplied rules to see if they seem valid. -# ----------------------------------------------------------------------------- -def verify_productions(cycle_check=1): - error = 0 - for p in Productions: - if not p: continue - - for s in p.prod: - if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error': - sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s)) - error = 1 - continue + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result - unused_tok = 0 - # Now verify all of the tokens - if yaccdebug: - _vf.write("Unused terminals:\n\n") - for s,v in Terminals.items(): - if s != 'error' and not v: - sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s) - if yaccdebug: _vf.write(" %s\n"% s) - unused_tok += 1 - - # Print out all of the productions - if yaccdebug: - _vf.write("\nGrammar\n\n") - for i in range(1,len(Productions)): - _vf.write("Rule %-5d %s\n" % (i, Productions[i])) - - unused_prod = 0 - # Verify the use of all productions - for s,v in Nonterminals.items(): - if not v: - p = Prodnames[s][0] - sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s)) - unused_prod += 1 - - - if unused_tok == 1: - sys.stderr.write("yacc: Warning. There is 1 unused token.\n") - if unused_tok > 1: - sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok) - - if unused_prod == 1: - sys.stderr.write("yacc: Warning. There is 1 unused rule.\n") - if unused_prod > 1: - sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod) - - if yaccdebug: - _vf.write("\nTerminals, with rules where they appear\n\n") - ks = Terminals.keys() - ks.sort() - for k in ks: - _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]]))) - _vf.write("\nNonterminals, with rules where they appear\n\n") - ks = Nonterminals.keys() - ks.sort() - for k in ks: - _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]]))) - - if (cycle_check): - compute_reachable() - error += compute_terminates() -# error += check_cycles() - return error + if t is None: -# ----------------------------------------------------------------------------- -# build_lritems() -# -# This function walks the list of productions and builds a complete set of the -# LR items. The LR items are stored in two ways: First, they are uniquely -# numbered and placed in the list _lritems. Second, a linked list of LR items -# is built for each production. For example: -# -# E -> E PLUS E -# -# Creates the list -# -# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] -# ----------------------------------------------------------------------------- -def build_lritems(): - for p in Productions: - lastlri = p - lri = p.lr_item(0) - i = 0 - while 1: - lri = p.lr_item(i) - lastlri.lr_next = lri - if not lri: break - lri.lr_num = len(LRitems) - LRitems.append(lri) - lastlri = lri - i += 1 + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return - # In order for the rest of the parser generator to work, we need to - # guarantee that no more lritems are generated. Therefore, we nuke - # the p.lr_item method. (Only used in debugging) - # Production.lr_item = None + else: + errorcount = error_count -# ----------------------------------------------------------------------------- -# add_precedence() -# -# Given a list of precedence rules, add to the precedence table. -# ----------------------------------------------------------------------------- + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. -def add_precedence(plist): - plevel = 0 - error = 0 - for p in plist: - plevel += 1 - try: - prec = p[0] - terms = p[1:] - if prec != 'left' and prec != 'right' and prec != 'nonassoc': - sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec) - return -1 - for t in terms: - if Precedence.has_key(t): - sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t) - error += 1 + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] continue - Precedence[t] = (prec,plevel) - except: - sys.stderr.write("yacc: Invalid precedence table.\n") - error += 1 - return error + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token -# ----------------------------------------------------------------------------- -# augment_grammar() -# -# Compute the augmented grammar. This is just a rule S' -> start where start -# is the starting symbol. -# ----------------------------------------------------------------------------- + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return -def augment_grammar(start=None): - if not start: - start = Productions[1].name - Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None) - Productions[0].usyms = [ start ] - Nonterminals[start].append(0) + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING + lookahead = None + continue + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' -# ------------------------------------------------------------------------- -# first() -# -# Compute the value of FIRST1(beta) where beta is a tuple of symbols. -# -# During execution of compute_first1, the result may be incomplete. -# Afterward (e.g., when called from compute_follow()), it will be complete. -# ------------------------------------------------------------------------- -def first(beta): - - # We are computing First(x1,x2,x3,...,xn) - result = [ ] - for x in beta: - x_produces_empty = 0 - - # Add all the non- symbols of First[x] to the result. - for f in First[x]: - if f == '': - x_produces_empty = 1 - else: - if f not in result: result.append(f) + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING + statestack.pop() + state = statestack[-1] - if x_produces_empty: - # We have to consider the next x in beta, - # i.e. stay in the loop. - pass + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt_notrack(). + # + # Optimized version of parseopt() with line number tracking removed. + # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated + # by the ply/ygen.py script. Make changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-notrack-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token else: - # We don't have to consider any further symbols in beta. - break - else: - # There was no 'break' from the loop, - # so x_produces_empty was true for all x in beta, - # so beta produces empty as well. - result.append('') + get_token = tokenfunc - return result + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result + + if t is None: + + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return -# FOLLOW(x) -# Given a non-terminal. This function computes the set of all symbols -# that might follow it. Dragon book, p. 189. - -def compute_follow(start=None): - # Add '$' to the follow list of the start symbol - for k in Nonterminals.keys(): - Follow[k] = [ ] - - if not start: - start = Productions[1].name - - Follow[start] = [ '$' ] - - while 1: - didadd = 0 - for p in Productions[1:]: - # Here is the production set - for i in range(len(p.prod)): - B = p.prod[i] - if Nonterminals.has_key(B): - # Okay. We got a non-terminal in a production - fst = first(p.prod[i+1:]) - hasempty = 0 - for f in fst: - if f != '' and f not in Follow[B]: - Follow[B].append(f) - didadd = 1 - if f == '': - hasempty = 1 - if hasempty or i == (len(p.prod)-1): - # Add elements of follow(a) to follow(b) - for f in Follow[p.name]: - if f not in Follow[B]: - Follow[B].append(f) - didadd = 1 - if not didadd: break - - if 0 and yaccdebug: - _vf.write('\nFollow:\n') - for k in Nonterminals.keys(): - _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]]))) - -# ------------------------------------------------------------------------- -# compute_first1() + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-notrack-end + +# ----------------------------------------------------------------------------- +# === Grammar Representation === # -# Compute the value of FIRST1(X) for all symbols -# ------------------------------------------------------------------------- -def compute_first1(): - - # Terminals: - for t in Terminals.keys(): - First[t] = [t] - - First['$'] = ['$'] - First['#'] = ['#'] # what's this for? - - # Nonterminals: - - # Initialize to the empty set: - for n in Nonterminals.keys(): - First[n] = [] - - # Then propagate symbols until no change: - while 1: - some_change = 0 - for n in Nonterminals.keys(): - for p in Prodnames[n]: - for f in first(p.prod): - if f not in First[n]: - First[n].append( f ) - some_change = 1 - if not some_change: - break - - if 0 and yaccdebug: - _vf.write('\nFirst:\n') - for k in Nonterminals.keys(): - _vf.write("%-20s : %s\n" % - (k, " ".join([str(s) for s in First[k]]))) +# The following functions, classes, and variables are used to represent and +# manipulate the rules that make up a grammar. +# ----------------------------------------------------------------------------- + +# regex matching identifiers +_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- -# === SLR Generation === +# class Production: +# +# This class stores the raw information about a single production or grammar rule. +# A grammar rule refers to a specification such as this: +# +# expr : expr PLUS term # -# The following functions are used to construct SLR (Simple LR) parsing tables -# as described on p.221-229 of the dragon book. +# Here are the basic attributes defined on all productions +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','PLUS','term'] +# prec - Production precedence level +# number - Production number. +# func - Function that executes on reduce +# file - File where production function is defined +# lineno - Line number where production function is defined +# +# The following attributes are defined or optional. +# +# len - Length of the production (number of symbols on right hand side) +# usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- -# Global variables for the LR parsing engine -def lr_init_vars(): - global _lr_action, _lr_goto, _lr_method - global _lr_goto_cache - - _lr_action = { } # Action table - _lr_goto = { } # Goto table - _lr_method = "Unknown" # LR method used - _lr_goto_cache = { } - -# Compute the LR(0) closure operation on I, where I is a set of LR(0) items. -# prodlist is a list of productions. - -_add_count = 0 # Counter used to detect cycles - -def lr0_closure(I): - global _add_count - - _add_count += 1 - prodlist = Productions - - # Add everything in I to J - J = I[:] - didadd = 1 - while didadd: - didadd = 0 - for j in J: - for x in j.lrafter: - if x.lr0_added == _add_count: continue - # Add B --> .G to J - J.append(x.lr_next) - x.lr0_added = _add_count - didadd = 1 - - return J - -# Compute the LR(0) goto function goto(I,X) where I is a set -# of LR(0) items and X is a grammar symbol. This function is written -# in a way that guarantees uniqueness of the generated goto sets -# (i.e. the same goto set will never be returned as two different Python -# objects). With uniqueness, we can later do fast set comparisons using -# id(obj) instead of element-wise comparison. - -def lr0_goto(I,x): - # First we look for a previously cached entry - g = _lr_goto_cache.get((id(I),x),None) - if g: return g - - # Now we generate the goto set in a way that guarantees uniqueness - # of the result - - s = _lr_goto_cache.get(x,None) - if not s: - s = { } - _lr_goto_cache[x] = s - - gs = [ ] - for p in I: - n = p.lr_next - if n and n.lrbefore == x: - s1 = s.get(id(n),None) - if not s1: - s1 = { } - s[id(n)] = s1 - gs.append(n) - s = s1 - g = s.get('$',None) - if not g: - if gs: - g = lr0_closure(gs) - s['$'] = g +class Production(object): + reduced = 0 + def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): + self.name = name + self.prod = tuple(prod) + self.number = number + self.func = func + self.callable = None + self.file = file + self.line = line + self.prec = precedence + + # Internal settings used during table construction + + self.len = len(self.prod) # Length of the production + + # Create a list of unique production symbols used in the production + self.usyms = [] + for s in self.prod: + if s not in self.usyms: + self.usyms.append(s) + + # List of all LR items for the production + self.lr_items = [] + self.lr_next = None + + # Create a string representation + if self.prod: + self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) else: - s['$'] = gs - _lr_goto_cache[(id(I),x)] = g - return g + self.str = '%s -> ' % self.name -# Added for LALR(1) + def __str__(self): + return self.str -# Given a setnumber of an lr0 state and a symbol return the setnumber of the goto state -def lr0_goto_setnumber(I_setnumber, x): - global Canonical - global GotoSetNum + def __repr__(self): + return 'Production(' + str(self) + ')' - if GotoSetNum.has_key((I_setnumber, x)): - setnumber = GotoSetNum[(I_setnumber, x)] - else: - gset = lr0_goto(Canonical[I_setnumber], x) - if not gset: - return -1 + def __len__(self): + return len(self.prod) + + def __nonzero__(self): + return 1 + + def __getitem__(self, index): + return self.prod[index] + + # Return the nth lr_item from the production (or None if at the end) + def lr_item(self, n): + if n > len(self.prod): + return None + p = LRItem(self, n) + # Precompute the list of productions immediately following. + try: + p.lr_after = self.Prodnames[p.prod[n+1]] + except (IndexError, KeyError): + p.lr_after = [] + try: + p.lr_before = p.prod[n-1] + except IndexError: + p.lr_before = None + return p + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + +# This class serves as a minimal standin for Production objects when +# reading table data from files. It only contains information +# actually used by the LR parsing engine, plus some additional +# debugging information. +class MiniProduction(object): + def __init__(self, str, name, len, func, file, line): + self.name = name + self.len = len + self.func = func + self.callable = None + self.file = file + self.line = line + self.str = str + + def __str__(self): + return self.str + + def __repr__(self): + return 'MiniProduction(%s)' % self.str + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + + +# ----------------------------------------------------------------------------- +# class LRItem +# +# This class represents a specific stage of parsing a production rule. For +# example: +# +# expr : expr . PLUS term +# +# In the above, the "." represents the current location of the parse. Here +# basic attributes: +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] +# number - Production number. +# +# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' +# then lr_next refers to 'expr -> expr PLUS . term' +# lr_index - LR item index (location of the ".") in the prod list. +# lookaheads - LALR lookahead symbols for this item +# len - Length of the production (number of symbols on right hand side) +# lr_after - List of all productions that immediately follow +# lr_before - Grammar symbol immediately before +# ----------------------------------------------------------------------------- + +class LRItem(object): + def __init__(self, p, n): + self.name = p.name + self.prod = list(p.prod) + self.number = p.number + self.lr_index = n + self.lookaheads = {} + self.prod.insert(n, '.') + self.prod = tuple(self.prod) + self.len = len(self.prod) + self.usyms = p.usyms + + def __str__(self): + if self.prod: + s = '%s -> %s' % (self.name, ' '.join(self.prod)) else: - gsetlen = len(gset) - for i in xrange(len(gset[0].setnumbers)): - inall = 1 - for item in gset: - if not item.setnumbers[i]: - inall = 0 - break - if inall and len(Canonical[i]) == gsetlen: - setnumber = i - break # Note: DB. I added this to improve performance. - # Not sure if this breaks the algorithm (it doesn't appear to). - - GotoSetNum[(I_setnumber, x)] = setnumber - - return setnumber - -# Compute the kernel of a set of LR(0) items -def lr0_kernel(I): - KI = [ ] - for p in I: - if p.name == "S'" or p.lr_index > 0 or p.len == 0: - KI.append(p) - - return KI - -_lr0_cidhash = { } - -# Compute the LR(0) sets of item function -def lr0_items(): - - C = [ lr0_closure([Productions[0].lr_next]) ] - i = 0 - for I in C: - _lr0_cidhash[id(I)] = i - i += 1 - - # Loop over the items in C and each grammar symbols - i = 0 - while i < len(C): - I = C[i] - i += 1 - - # Collect all of the symbols that could possibly be in the goto(I,X) sets - asyms = { } - for ii in I: - for s in ii.usyms: - asyms[s] = None - - for x in asyms.keys(): - g = lr0_goto(I,x) - if not g: continue - if _lr0_cidhash.has_key(id(g)): continue - _lr0_cidhash[id(g)] = len(C) - C.append(g) - - return C + s = '%s -> ' % self.name + return s + + def __repr__(self): + return 'LRItem(' + str(self) + ')' # ----------------------------------------------------------------------------- -# slr_parse_table() +# rightmost_terminal() # -# This function constructs an SLR table. +# Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- -def slr_parse_table(): - global _lr_method - goto = _lr_goto # Goto array - action = _lr_action # Action array - actionp = { } # Action production array (temporary) - - _lr_method = "SLR" - - n_srconflict = 0 - n_rrconflict = 0 - - if yaccdebug: - sys.stderr.write("yacc: Generating SLR parsing table...\n") - _vf.write("\n\nParsing method: SLR\n\n") - - # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items - # This determines the number of states - - C = lr0_items() - - # Build the parser table, state by state - st = 0 - for I in C: - # Loop over each production in I - actlist = [ ] # List of actions - - if yaccdebug: - _vf.write("\nstate %d\n\n" % st) - for p in I: - _vf.write(" (%d) %s\n" % (p.number, str(p))) - _vf.write("\n") +def rightmost_terminal(symbols, terminals): + i = len(symbols) - 1 + while i >= 0: + if symbols[i] in terminals: + return symbols[i] + i -= 1 + return None - for p in I: - try: - if p.prod[-1] == ".": - if p.name == "S'": - # Start symbol. Accept! - action[st,"$"] = 0 - actionp[st,"$"] = p +# ----------------------------------------------------------------------------- +# === GRAMMAR CLASS === +# +# The following class represents the contents of the specified grammar along +# with various computed properties such as first sets, follow sets, LR items, etc. +# This data is used for critical parts of the table generation process later. +# ----------------------------------------------------------------------------- + +class GrammarError(YaccError): + pass + +class Grammar(object): + def __init__(self, terminals): + self.Productions = [None] # A list of all of the productions. The first + # entry is always reserved for the purpose of + # building an augmented grammar + + self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all + # productions of that nonterminal. + + self.Prodmap = {} # A dictionary that is only used to detect duplicate + # productions. + + self.Terminals = {} # A dictionary mapping the names of terminal symbols to a + # list of the rules where they are used. + + for term in terminals: + self.Terminals[term] = [] + + self.Terminals['error'] = [] + + self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list + # of rule numbers where they are used. + + self.First = {} # A dictionary of precomputed FIRST(x) symbols + + self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols + + self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the + # form ('right',level) or ('nonassoc', level) or ('left',level) + + self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. + # This is only used to provide error checking and to generate + # a warning about unused precedence rules. + + self.Start = None # Starting symbol for the grammar + + + def __len__(self): + return len(self.Productions) + + def __getitem__(self, index): + return self.Productions[index] + + # ----------------------------------------------------------------------------- + # set_precedence() + # + # Sets the precedence for a given terminal. assoc is the associativity such as + # 'left','right', or 'nonassoc'. level is a numeric level. + # + # ----------------------------------------------------------------------------- + + def set_precedence(self, term, assoc, level): + assert self.Productions == [None], 'Must call set_precedence() before add_production()' + if term in self.Precedence: + raise GrammarError('Precedence already specified for terminal %r' % term) + if assoc not in ['left', 'right', 'nonassoc']: + raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") + self.Precedence[term] = (assoc, level) + + # ----------------------------------------------------------------------------- + # add_production() + # + # Given an action function, this function assembles a production rule and + # computes its precedence level. + # + # The production rule is supplied as a list of symbols. For example, + # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and + # symbols ['expr','PLUS','term']. + # + # Precedence is determined by the precedence of the right-most non-terminal + # or the precedence of a terminal specified by %prec. + # + # A variety of error checks are performed to make sure production symbols + # are valid and that %prec is used correctly. + # ----------------------------------------------------------------------------- + + def add_production(self, prodname, syms, func=None, file='', line=0): + + if prodname in self.Terminals: + raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) + if prodname == 'error': + raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) + if not _is_identifier.match(prodname): + raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) + + # Look for literal tokens + for n, s in enumerate(syms): + if s[0] in "'\"": + try: + c = eval(s) + if (len(c) > 1): + raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % + (file, line, s, prodname)) + if c not in self.Terminals: + self.Terminals[c] = [] + syms[n] = c + continue + except SyntaxError: + pass + if not _is_identifier.match(s) and s != '%prec': + raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) + + # Determine the precedence level + if '%prec' in syms: + if syms[-1] == '%prec': + raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) + if syms[-2] != '%prec': + raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % + (file, line)) + precname = syms[-1] + prodprec = self.Precedence.get(precname) + if not prodprec: + raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) + else: + self.UsedPrecedence.add(precname) + del syms[-2:] # Drop %prec from the rule + else: + # If no %prec, precedence is determined by the rightmost terminal symbol + precname = rightmost_terminal(syms, self.Terminals) + prodprec = self.Precedence.get(precname, ('right', 0)) + + # See if the rule is already in the rulemap + map = '%s -> %s' % (prodname, syms) + if map in self.Prodmap: + m = self.Prodmap[map] + raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + + 'Previous definition at %s:%d' % (m.file, m.line)) + + # From this point on, everything is valid. Create a new Production instance + pnumber = len(self.Productions) + if prodname not in self.Nonterminals: + self.Nonterminals[prodname] = [] + + # Add the production number to Terminals and Nonterminals + for t in syms: + if t in self.Terminals: + self.Terminals[t].append(pnumber) + else: + if t not in self.Nonterminals: + self.Nonterminals[t] = [] + self.Nonterminals[t].append(pnumber) + + # Create a production and add it to the list of productions + p = Production(pnumber, prodname, syms, prodprec, func, file, line) + self.Productions.append(p) + self.Prodmap[map] = p + + # Add to the global productions list + try: + self.Prodnames[prodname].append(p) + except KeyError: + self.Prodnames[prodname] = [p] + + # ----------------------------------------------------------------------------- + # set_start() + # + # Sets the starting symbol and creates the augmented grammar. Production + # rule 0 is S' -> start where start is the start symbol. + # ----------------------------------------------------------------------------- + + def set_start(self, start=None): + if not start: + start = self.Productions[1].name + if start not in self.Nonterminals: + raise GrammarError('start symbol %s undefined' % start) + self.Productions[0] = Production(0, "S'", [start]) + self.Nonterminals[start].append(0) + self.Start = start + + # ----------------------------------------------------------------------------- + # find_unreachable() + # + # Find all of the nonterminal symbols that can't be reached from the starting + # symbol. Returns a list of nonterminals that can't be reached. + # ----------------------------------------------------------------------------- + + def find_unreachable(self): + + # Mark all symbols that are reachable from a symbol s + def mark_reachable_from(s): + if s in reachable: + return + reachable.add(s) + for p in self.Prodnames.get(s, []): + for r in p.prod: + mark_reachable_from(r) + + reachable = set() + mark_reachable_from(self.Productions[0].prod[0]) + return [s for s in self.Nonterminals if s not in reachable] + + # ----------------------------------------------------------------------------- + # infinite_cycles() + # + # This function looks at the various parsing rules and tries to detect + # infinite recursion cycles (grammar rules where there is no possible way + # to derive a string of only terminals). + # ----------------------------------------------------------------------------- + + def infinite_cycles(self): + terminates = {} + + # Terminals: + for t in self.Terminals: + terminates[t] = True + + terminates['$end'] = True + + # Nonterminals: + + # Initialize to false: + for n in self.Nonterminals: + terminates[n] = False + + # Then propagate termination until no change: + while True: + some_change = False + for (n, pl) in self.Prodnames.items(): + # Nonterminal n terminates iff any of its productions terminates. + for p in pl: + # Production p terminates iff all of its rhs symbols terminate. + for s in p.prod: + if not terminates[s]: + # The symbol s does not terminate, + # so production p does not terminate. + p_terminates = False + break else: - # We are at the end of a production. Reduce! - for a in Follow[p.name]: - actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) - r = action.get((st,a),None) - if r is not None: - # Whoa. Have a shift/reduce or reduce/reduce conflict - if r > 0: - # Need to decide on shift or reduce here - # By default we favor shifting. Need to add - # some precedence rules here. - sprec,slevel = Productions[actionp[st,a].number].prec - rprec,rlevel = Precedence.get(a,('right',0)) - if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): - # We really need to reduce here. - action[st,a] = -p.number - actionp[st,a] = p - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - n_srconflict += 1 - elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: - # Hmmm. Guess we'll keep the shift - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) - n_srconflict +=1 - elif r < 0: - # Reduce/reduce conflict. In this case, we favor the rule - # that was defined first in the grammar file - oldp = Productions[-r] - pp = Productions[p.number] - if oldp.line > pp.line: - action[st,a] = -p.number - actionp[st,a] = p - # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st) - n_rrconflict += 1 - _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, actionp[st,a].number, actionp[st,a])) - _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,actionp[st,a].number, actionp[st,a])) - else: - sys.stderr.write("Unknown conflict in state %d\n" % st) - else: - action[st,a] = -p.number - actionp[st,a] = p + # didn't break from the loop, + # so every symbol s terminates + # so production p terminates. + p_terminates = True + + if p_terminates: + # symbol n terminates! + if not terminates[n]: + terminates[n] = True + some_change = True + # Don't need to consider any more productions for this n. + break + + if not some_change: + break + + infinite = [] + for (s, term) in terminates.items(): + if not term: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + # s is used-but-not-defined, and we've already warned of that, + # so it would be overkill to say that it's also non-terminating. + pass else: - i = p.lr_index - a = p.prod[i+1] # Get symbol right after the "." - if Terminals.has_key(a): - g = lr0_goto(I,a) - j = _lr0_cidhash.get(id(g),-1) - if j >= 0: - # We are in a shift state - actlist.append((a,p,"shift and go to state %d" % j)) - r = action.get((st,a),None) - if r is not None: - # Whoa have a shift/reduce or shift/shift conflict - if r > 0: - if r != j: - sys.stderr.write("Shift/shift conflict in state %d\n" % st) - elif r < 0: - # Do a precedence check. - # - if precedence of reduce rule is higher, we reduce. - # - if precedence of reduce is same and left assoc, we reduce. - # - otherwise we shift - rprec,rlevel = Productions[actionp[st,a].number].prec - sprec,slevel = Precedence.get(a,('right',0)) - if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')): - # We decide to shift here... highest precedence to shift - action[st,a] = j - actionp[st,a] = p - if not slevel and not rlevel: - n_srconflict += 1 - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) - elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: - # Hmmm. Guess we'll keep the reduce - if not slevel and not rlevel: - n_srconflict +=1 - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - - else: - sys.stderr.write("Unknown conflict in state %d\n" % st) - else: - action[st,a] = j - actionp[st,a] = p - - except StandardError,e: - raise YaccError, "Hosed in slr_parse_table", e - - # Print the actions associated with each terminal - if yaccdebug: - _actprint = { } - for a,p,m in actlist: - if action.has_key((st,a)): - if p is actionp[st,a]: - _vf.write(" %-15s %s\n" % (a,m)) - _actprint[(a,m)] = 1 - _vf.write("\n") - for a,p,m in actlist: - if action.has_key((st,a)): - if p is not actionp[st,a]: - if not _actprint.has_key((a,m)): - _vf.write(" ! %-15s [ %s ]\n" % (a,m)) - _actprint[(a,m)] = 1 - - # Construct the goto table for this state - if yaccdebug: - _vf.write("\n") - nkeys = { } - for ii in I: - for s in ii.usyms: - if Nonterminals.has_key(s): - nkeys[s] = None - for n in nkeys.keys(): - g = lr0_goto(I,n) - j = _lr0_cidhash.get(id(g),-1) - if j >= 0: - goto[st,n] = j - if yaccdebug: - _vf.write(" %-30s shift and go to state %d\n" % (n,j)) - - st += 1 - - if yaccdebug: - if n_srconflict == 1: - sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict) - if n_srconflict > 1: - sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict) - if n_rrconflict == 1: - sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict) - if n_rrconflict > 1: - sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict) + infinite.append(s) + + return infinite + + # ----------------------------------------------------------------------------- + # undefined_symbols() + # + # Find all symbols that were used the grammar, but not defined as tokens or + # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol + # and prod is the production where the symbol was used. + # ----------------------------------------------------------------------------- + def undefined_symbols(self): + result = [] + for p in self.Productions: + if not p: + continue + for s in p.prod: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + result.append((s, p)) + return result + + # ----------------------------------------------------------------------------- + # unused_terminals() + # + # Find all terminals that were defined, but not used by the grammar. Returns + # a list of all symbols. + # ----------------------------------------------------------------------------- + def unused_terminals(self): + unused_tok = [] + for s, v in self.Terminals.items(): + if s != 'error' and not v: + unused_tok.append(s) + + return unused_tok + + # ------------------------------------------------------------------------------ + # unused_rules() + # + # Find all grammar rules that were defined, but not used (maybe not reachable) + # Returns a list of productions. + # ------------------------------------------------------------------------------ + + def unused_rules(self): + unused_prod = [] + for s, v in self.Nonterminals.items(): + if not v: + p = self.Prodnames[s][0] + unused_prod.append(p) + return unused_prod + + # ----------------------------------------------------------------------------- + # unused_precedence() + # + # Returns a list of tuples (term,precedence) corresponding to precedence + # rules that were never used by the grammar. term is the name of the terminal + # on which precedence was applied and precedence is a string such as 'left' or + # 'right' corresponding to the type of precedence. + # ----------------------------------------------------------------------------- + + def unused_precedence(self): + unused = [] + for termname in self.Precedence: + if not (termname in self.Terminals or termname in self.UsedPrecedence): + unused.append((termname, self.Precedence[termname][0])) + + return unused + + # ------------------------------------------------------------------------- + # _first() + # + # Compute the value of FIRST1(beta) where beta is a tuple of symbols. + # + # During execution of compute_first1, the result may be incomplete. + # Afterward (e.g., when called from compute_follow()), it will be complete. + # ------------------------------------------------------------------------- + def _first(self, beta): + + # We are computing First(x1,x2,x3,...,xn) + result = [] + for x in beta: + x_produces_empty = False + + # Add all the non- symbols of First[x] to the result. + for f in self.First[x]: + if f == '': + x_produces_empty = True + else: + if f not in result: + result.append(f) + if x_produces_empty: + # We have to consider the next x in beta, + # i.e. stay in the loop. + pass + else: + # We don't have to consider any further symbols in beta. + break + else: + # There was no 'break' from the loop, + # so x_produces_empty was true for all x in beta, + # so beta produces empty as well. + result.append('') + + return result + + # ------------------------------------------------------------------------- + # compute_first() + # + # Compute the value of FIRST1(X) for all symbols + # ------------------------------------------------------------------------- + def compute_first(self): + if self.First: + return self.First + + # Terminals: + for t in self.Terminals: + self.First[t] = [t] + + self.First['$end'] = ['$end'] + + # Nonterminals: + + # Initialize to the empty set: + for n in self.Nonterminals: + self.First[n] = [] + + # Then propagate symbols until no change: + while True: + some_change = False + for n in self.Nonterminals: + for p in self.Prodnames[n]: + for f in self._first(p.prod): + if f not in self.First[n]: + self.First[n].append(f) + some_change = True + if not some_change: + break + + return self.First + + # --------------------------------------------------------------------- + # compute_follow() + # + # Computes all of the follow sets for every non-terminal symbol. The + # follow set is the set of all symbols that might follow a given + # non-terminal. See the Dragon book, 2nd Ed. p. 189. + # --------------------------------------------------------------------- + def compute_follow(self, start=None): + # If already computed, return the result + if self.Follow: + return self.Follow + + # If first sets not computed yet, do that first. + if not self.First: + self.compute_first() + + # Add '$end' to the follow list of the start symbol + for k in self.Nonterminals: + self.Follow[k] = [] + + if not start: + start = self.Productions[1].name + + self.Follow[start] = ['$end'] + + while True: + didadd = False + for p in self.Productions[1:]: + # Here is the production set + for i, B in enumerate(p.prod): + if B in self.Nonterminals: + # Okay. We got a non-terminal in a production + fst = self._first(p.prod[i+1:]) + hasempty = False + for f in fst: + if f != '' and f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if f == '': + hasempty = True + if hasempty or i == (len(p.prod)-1): + # Add elements of follow(a) to follow(b) + for f in self.Follow[p.name]: + if f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if not didadd: + break + return self.Follow + + + # ----------------------------------------------------------------------------- + # build_lritems() + # + # This function walks the list of productions and builds a complete set of the + # LR items. The LR items are stored in two ways: First, they are uniquely + # numbered and placed in the list _lritems. Second, a linked list of LR items + # is built for each production. For example: + # + # E -> E PLUS E + # + # Creates the list + # + # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] + # ----------------------------------------------------------------------------- + + def build_lritems(self): + for p in self.Productions: + lastlri = p + i = 0 + lr_items = [] + while True: + if i > len(p): + lri = None + else: + lri = LRItem(p, i) + # Precompute the list of productions immediately following + try: + lri.lr_after = self.Prodnames[lri.prod[i+1]] + except (IndexError, KeyError): + lri.lr_after = [] + try: + lri.lr_before = lri.prod[i-1] + except IndexError: + lri.lr_before = None + + lastlri.lr_next = lri + if not lri: + break + lr_items.append(lri) + lastlri = lri + i += 1 + p.lr_items = lr_items # ----------------------------------------------------------------------------- -# ==== LALR(1) Parsing ==== -# FINISHED! 5/20/2003 by Elias Ioup +# == Class LRTable == +# +# This basic class represents a basic table of LR parsing information. +# Methods for generating the tables are not defined here. They are defined +# in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- +class VersionError(YaccError): + pass -# Compute the lr1_closure of a set I. I is a list of productions and setnumber -# is the state that you want the lr items that are made from the to come from. - -_lr1_add_count = 0 - -def lr1_closure(I, setnumber = 0): - global _add_count - global Nonterminals - - _add_count += 1 - prodlist = Productions - - # Add everything in I to J - J = I[:] - Jhash = { } - for j in J: - Jhash[id(j)] = 1 - - didadd = 1 - while didadd: - didadd = 0 - for j in J: - jprod = j.prod - jlr_index = j.lr_index - jprodslice = jprod[jlr_index+2:] - - if jlr_index < len(jprod) - 1 and Nonterminals.has_key(jprod[jlr_index+1]): - first_syms = [] - - if j.lk_added.setdefault(setnumber, 0) < len(j.lookaheads[setnumber]): - for a in j.lookaheads[setnumber][j.lk_added[setnumber]:]: - # find b in FIRST(Xa) if j = [A->a.BX,a] - temp_first_syms = first(jprodslice + (a,)) - for x in temp_first_syms: - if x not in first_syms: - first_syms.append(x) - - j.lk_added[setnumber] = len(j.lookaheads[setnumber]) - - for x in j.lrafter: - - # Add B --> .G to J - if x.lr_next.lookaheads.has_key(setnumber): - _xlook = x.lr_next.lookaheads[setnumber] - for s in first_syms: - if s not in _xlook: - _xlook.append(s) - didadd = 1 - else: - x.lr_next.lookaheads[setnumber] = first_syms - didadd = 1 - - nid = id(x.lr_next) - if not Jhash.has_key(nid): - J.append(x.lr_next) - Jhash[nid] = 1 - - return J - -def add_lookaheads(K): - spontaneous = [] - propogate = [] - - for setnumber in range(len(K)): - for kitem in K[setnumber]: - kitem.lookaheads[setnumber] = ['#'] - J = lr1_closure([kitem], setnumber) - - # find the lookaheads that are spontaneously created from closures - # and the propogations of lookaheads between lr items - for item in J: - if item.lr_index < len(item.prod)-1: - for lookahead in item.lookaheads[setnumber]: - goto_setnumber = lr0_goto_setnumber(setnumber, item.prod[item.lr_index+1]) - next = None - if lookahead != '#': - if item.lr_next in K[goto_setnumber]: - next = item.lr_next - if next: - spontaneous.append((next, (lookahead, goto_setnumber))) - else: - if goto_setnumber > -1: - if item.lr_next in K[goto_setnumber]: - next = item.lr_next - - if next: - propogate.append(((kitem, setnumber), (next, goto_setnumber))) +class LRTable(object): + def __init__(self): + self.lr_action = None + self.lr_goto = None + self.lr_productions = None + self.lr_method = None - + def read_table(self, module): + if isinstance(module, types.ModuleType): + parsetab = module + else: + exec('import %s' % module) + parsetab = sys.modules[module] - for x in K[setnumber]: - x.lookaheads[setnumber] = [] + if parsetab._tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') - for x in spontaneous: - if x[1][0] not in x[0].lookaheads[x[1][1]]: - x[0].lookaheads[x[1][1]].append(x[1][0]) + self.lr_action = parsetab._lr_action + self.lr_goto = parsetab._lr_goto - K[0][0].lookaheads[0] = ['$'] + self.lr_productions = [] + for p in parsetab._lr_productions: + self.lr_productions.append(MiniProduction(*p)) + + self.lr_method = parsetab._lr_method + return parsetab._lr_signature + + def read_pickle(self, filename): + try: + import cPickle as pickle + except ImportError: + import pickle + + if not os.path.exists(filename): + raise ImportError + + in_f = open(filename, 'rb') + + tabversion = pickle.load(in_f) + if tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') + self.lr_method = pickle.load(in_f) + signature = pickle.load(in_f) + self.lr_action = pickle.load(in_f) + self.lr_goto = pickle.load(in_f) + productions = pickle.load(in_f) + + self.lr_productions = [] + for p in productions: + self.lr_productions.append(MiniProduction(*p)) + + in_f.close() + return signature + + # Bind all production function names to callable objects in pdict + def bind_callables(self, pdict): + for p in self.lr_productions: + p.bind(pdict) - pitems = {} - for x in propogate: - if pitems.has_key(x[0]): - pitems[x[0]].append(x[1]) - else: - pitems[x[0]] = [] - pitems[x[0]].append(x[1]) - - # propogate the lookaheads that were spontaneously generated - # based on the propogations produced above - stop = 0 - - while not stop: - stop = 1 - kindex = 0 - for set in K: - for item in set: - pkey = (item, kindex) - if pitems.has_key(pkey): - for propogation in pitems[pkey]: - gitem = propogation[0] - gsetnumber = propogation[1] - glookaheads = gitem.lookaheads[gsetnumber] - for lookahead in item.lookaheads[kindex]: - if lookahead not in glookaheads: - glookaheads.append(lookahead) - stop = 0 - kindex += 1 - -def ReduceNonterminals(): - global Nonterminals - - global TReductions - global NTReductions - - for nt in Nonterminals.keys(): - TReductions[nt] = [] - NTReductions[nt] = [] - - for nt in Nonterminals.keys(): - terms = ReduceToTerminals(nt) - TReductions[nt].extend(terms) - if not NTReductions.has_key(nt): - ReduceToNonterminals(nt) - - - -def ReduceToTerminals(nt): - global Prodnames - global Terminals - reducedterminals = [] - - for p in Prodnames[nt]: - if len(p.prod) > 0: - if Terminals.has_key(p.prod[0]): - if p.prod[0] not in reducedterminals: - reducedterminals.append(p.prod[0]) - else: - if p.prod[0] != nt: - terms = ReduceToTerminals(p.prod[0]) - for t in terms: - if t not in reducedterminals: - reducedterminals.append(t) - - return reducedterminals - - -def ReduceToNonterminals(nt): - global Prodnames - global Nonterminals - global NTReductions - reducednonterminals = [] - - for p in Prodnames[nt]: - if len(p.prod) > 0: - if Nonterminals.has_key(p.prod[0]): - if p.prod[0] not in reducednonterminals: - reducednonterminals.append(p.prod[0]) - if p.prod[0] != nt: - if not NTReductions.has_key(p.prod[0]): - ReduceToNonterminals(p.prod[0]) - - nterms = NTReductions[p.prod[0]] - for nt in nterms: - if nt not in reducednonterminals: - reducednonterminals.append(nt) - - - NTReductions[nt] = reducednonterminals # ----------------------------------------------------------------------------- -# lalr_parse_table() +# === LR Generator === # -# This function constructs an LALR table. +# The following classes and functions are used to generate LR parsing tables on +# a grammar. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# digraph() +# traverse() +# +# The following two functions are used to compute set valued functions +# of the form: +# +# F(x) = F'(x) U U{F(y) | x R y} +# +# This is used to compute the values of Read() sets as well as FOLLOW sets +# in LALR(1) generation. +# +# Inputs: X - An input set +# R - A relation +# FP - Set-valued function +# ------------------------------------------------------------------------------ + +def digraph(X, R, FP): + N = {} + for x in X: + N[x] = 0 + stack = [] + F = {} + for x in X: + if N[x] == 0: + traverse(x, N, stack, F, X, R, FP) + return F + +def traverse(x, N, stack, F, X, R, FP): + stack.append(x) + d = len(stack) + N[x] = d + F[x] = FP(x) # F(X) <- F'(x) + + rel = R(x) # Get y's related to x + for y in rel: + if N[y] == 0: + traverse(y, N, stack, F, X, R, FP) + N[x] = min(N[x], N[y]) + for a in F.get(y, []): + if a not in F[x]: + F[x].append(a) + if N[x] == d: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + while element != x: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + +class LALRError(YaccError): + pass + +# ----------------------------------------------------------------------------- +# == LRGeneratedTable == +# +# This class implements the LR table generation algorithm. There are no +# public methods except for write() # ----------------------------------------------------------------------------- -def lalr_parse_table(): - global _lr_method - goto = _lr_goto # Goto array - action = _lr_action # Action array - actionp = { } # Action production array (temporary) - goto_cache = _lr_goto_cache - cid_hash = _lr0_cidhash - - _lr_method = "LALR" - - n_srconflict = 0 - n_rrconflict = 0 - - if yaccdebug: - sys.stderr.write("yacc: Generating LALR(1) parsing table...\n") - _vf.write("\n\nParsing method: LALR(1)\n\n") - - # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items - # This determines the number of states - - C = lr0_items() - - global Canonical - Canonical = C - - ### - # Create the kernel states. - ### - K = [] - setC = [0]*len(C) - for x in C: - K.append(lr0_kernel(x)) - for y in x: - y.setnumbers = setC[:] - - _cindex = 0 - for x in C: - for y in x: - y.lookaheads[_cindex] = [] - y.setnumbers[_cindex] = 1 - _cindex = _cindex + 1 - - ### - # Add lookaheads to the lr items - ### - - add_lookaheads(K) - - ### - # Do the reductions for parsing first and keep them in globals - ### - - ReduceNonterminals() - - global TReductions - global NTReductions - global Prodempty - - EmptyAncestors = {} - for y in Prodempty.keys(): - EmptyAncestors[y] = [] - for x in NTReductions.items(): - for y in x[1]: - if Prodempty.has_key(y): - EmptyAncestors[y].append(x[0]) - - - # Build the parser table, state by state - st = 0 - for I in C: - # Loop over each production in I - actlist = [ ] # List of actions - acthash = { } - - idI = id(I) - - if yaccdebug: - _vf.write("\nstate %d\n\n" % st) - for p in I: - _vf.write(" (%d) %s\n" % (p.number, str(p))) - _vf.write("\n") - global First +class LRGeneratedTable(LRTable): + def __init__(self, grammar, method='LALR', log=None): + if method not in ['SLR', 'LALR']: + raise LALRError('Unsupported method %s' % method) + + self.grammar = grammar + self.lr_method = method + + # Set up the logger + if not log: + log = NullLogger() + self.log = log + + # Internal attributes + self.lr_action = {} # Action table + self.lr_goto = {} # Goto table + self.lr_productions = grammar.Productions # Copy of grammar Production array + self.lr_goto_cache = {} # Cache of computed gotos + self.lr0_cidhash = {} # Cache of closures + + self._add_count = 0 # Internal counter used to detect cycles + + # Diagonistic information filled in by the table generator + self.sr_conflict = 0 + self.rr_conflict = 0 + self.conflicts = [] # List of conflicts + + self.sr_conflicts = [] + self.rr_conflicts = [] + + # Build the tables + self.grammar.build_lritems() + self.grammar.compute_first() + self.grammar.compute_follow() + self.lr_parse_table() + + # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. + + def lr0_closure(self, I): + self._add_count += 1 + + # Add everything in I to J + J = I[:] + didadd = True + while didadd: + didadd = False + for j in J: + for x in j.lr_after: + if getattr(x, 'lr0_added', 0) == self._add_count: + continue + # Add B --> .G to J + J.append(x.lr_next) + x.lr0_added = self._add_count + didadd = True + + return J + + # Compute the LR(0) goto function goto(I,X) where I is a set + # of LR(0) items and X is a grammar symbol. This function is written + # in a way that guarantees uniqueness of the generated goto sets + # (i.e. the same goto set will never be returned as two different Python + # objects). With uniqueness, we can later do fast set comparisons using + # id(obj) instead of element-wise comparison. + + def lr0_goto(self, I, x): + # First we look for a previously cached entry + g = self.lr_goto_cache.get((id(I), x)) + if g: + return g + + # Now we generate the goto set in a way that guarantees uniqueness + # of the result + + s = self.lr_goto_cache.get(x) + if not s: + s = {} + self.lr_goto_cache[x] = s + + gs = [] for p in I: - try: - if p.prod[-1] == ".": - if p.name == "S'": - # Start symbol. Accept! - action[st,"$"] = 0 - actionp[st,"$"] = p - elif len(p.prod) == 0: - ancestors = EmptyAncestors[p.name] - for i in ancestors: - for s in K: - if i in s: - input_list = [] - plist = Productions[i.name] - for x in plist: - if len(x.prod) > 0 and x.prod[0] == p.name: - n = p.prod[1:] - d = x.prod[lr_index+2:] - for l in x.lookaheads.items(): - flist = First[tuple(n+d+[l])] - for f in flist: - if f not in input_list and f in p.lookaheads[st]: - input_list.append(f) - - # We are at the end of a production. Reduce! - #print "input_list: %s" % input_list - #print "Follow[p.name]: %s" % Follow[p.name] - for a in input_list: - actlist.append((a,p,"reduce using rule %d (%s) " % (p.number,p))) - r = action.get((st,a),None) - if r is not None: - # Whoa. Have a shift/reduce or reduce/reduce conflict - if r > 0: - # Need to decide on shift or reduce here - # By default we favor shifting. Need to add - # some precedence rules here. - sprec,slevel = Productions[actionp[st,a].number].prec - rprec,rlevel = Precedence.get(a,('right',0)) - if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): - # We really need to reduce here. - action[st,a] = -p.number - actionp[st,a] = p - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - n_srconflict += 1 - elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: - # Hmmm. Guess we'll keep the shift - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) - n_srconflict +=1 - elif r < 0: - # Reduce/reduce conflict. In this case, we favor the rule - # that was defined first in the grammar file - oldp = Productions[-r] - pp = Productions[p.number] - if oldp.line > pp.line: - action[st,a] = -p.number - actionp[st,a] = p - # print "Reduce/reduce conflict in state %d" % st - n_rrconflict += 1 - _vfc.write("reduce/reduce conflict in state %d resolved using rule %d.\n" % (st, actionp[st,a].number)) - _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d.\n" % (a,actionp[st,a].number)) - else: - sys.stderr.write("Unknown conflict in state %d\n" % st) - else: - action[st,a] = -p.number - actionp[st,a] = p + n = p.lr_next + if n and n.lr_before == x: + s1 = s.get(id(n)) + if not s1: + s1 = {} + s[id(n)] = s1 + gs.append(n) + s = s1 + g = s.get('$end') + if not g: + if gs: + g = self.lr0_closure(gs) + s['$end'] = g + else: + s['$end'] = gs + self.lr_goto_cache[(id(I), x)] = g + return g + + # Compute the LR(0) sets of item function + def lr0_items(self): + C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] + i = 0 + for I in C: + self.lr0_cidhash[id(I)] = i + i += 1 + + # Loop over the items in C and each grammar symbols + i = 0 + while i < len(C): + I = C[i] + i += 1 + + # Collect all of the symbols that could possibly be in the goto(I,X) sets + asyms = {} + for ii in I: + for s in ii.usyms: + asyms[s] = None + + for x in asyms: + g = self.lr0_goto(I, x) + if not g or id(g) in self.lr0_cidhash: + continue + self.lr0_cidhash[id(g)] = len(C) + C.append(g) + + return C + + # ----------------------------------------------------------------------------- + # ==== LALR(1) Parsing ==== + # + # LALR(1) parsing is almost exactly the same as SLR except that instead of + # relying upon Follow() sets when performing reductions, a more selective + # lookahead set that incorporates the state of the LR(0) machine is utilized. + # Thus, we mainly just have to focus on calculating the lookahead sets. + # + # The method used here is due to DeRemer and Pennelo (1982). + # + # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) + # Lookahead Sets", ACM Transactions on Programming Languages and Systems, + # Vol. 4, No. 4, Oct. 1982, pp. 615-649 + # + # Further details can also be found in: + # + # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", + # McGraw-Hill Book Company, (1985). + # + # ----------------------------------------------------------------------------- + + # ----------------------------------------------------------------------------- + # compute_nullable_nonterminals() + # + # Creates a dictionary containing all of the non-terminals that might produce + # an empty production. + # ----------------------------------------------------------------------------- + + def compute_nullable_nonterminals(self): + nullable = set() + num_nullable = 0 + while True: + for p in self.grammar.Productions[1:]: + if p.len == 0: + nullable.add(p.name) + continue + for t in p.prod: + if t not in nullable: + break + else: + nullable.add(p.name) + if len(nullable) == num_nullable: + break + num_nullable = len(nullable) + return nullable + + # ----------------------------------------------------------------------------- + # find_nonterminal_trans(C) + # + # Given a set of LR(0) items, this functions finds all of the non-terminal + # transitions. These are transitions in which a dot appears immediately before + # a non-terminal. Returns a list of tuples of the form (state,N) where state + # is the state number and N is the nonterminal symbol. + # + # The input C is the set of LR(0) items. + # ----------------------------------------------------------------------------- + + def find_nonterminal_transitions(self, C): + trans = [] + for stateno, state in enumerate(C): + for p in state: + if p.lr_index < p.len - 1: + t = (stateno, p.prod[p.lr_index+1]) + if t[1] in self.grammar.Nonterminals: + if t not in trans: + trans.append(t) + return trans + + # ----------------------------------------------------------------------------- + # dr_relation() + # + # Computes the DR(p,A) relationships for non-terminal transitions. The input + # is a tuple (state,N) where state is a number and N is a nonterminal symbol. + # + # Returns a list of terminals. + # ----------------------------------------------------------------------------- + + def dr_relation(self, C, trans, nullable): + state, N = trans + terms = [] + + g = self.lr0_goto(C[state], N) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index+1] + if a in self.grammar.Terminals: + if a not in terms: + terms.append(a) + + # This extra bit is to handle the start state + if state == 0 and N == self.grammar.Productions[0].prod[0]: + terms.append('$end') + + return terms + + # ----------------------------------------------------------------------------- + # reads_relation() + # + # Computes the READS() relation (p,A) READS (t,C). + # ----------------------------------------------------------------------------- + + def reads_relation(self, C, trans, empty): + # Look for empty transitions + rel = [] + state, N = trans + + g = self.lr0_goto(C[state], N) + j = self.lr0_cidhash.get(id(g), -1) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index + 1] + if a in empty: + rel.append((j, a)) + + return rel + + # ----------------------------------------------------------------------------- + # compute_lookback_includes() + # + # Determines the lookback and includes relations + # + # LOOKBACK: + # + # This relation is determined by running the LR(0) state machine forward. + # For example, starting with a production "N : . A B C", we run it forward + # to obtain "N : A B C ." We then build a relationship between this final + # state and the starting state. These relationships are stored in a dictionary + # lookdict. + # + # INCLUDES: + # + # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). + # + # This relation is used to determine non-terminal transitions that occur + # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) + # if the following holds: + # + # B -> LAT, where T -> epsilon and p' -L-> p + # + # L is essentially a prefix (which may be empty), T is a suffix that must be + # able to derive an empty string. State p' must lead to state p with the string L. + # + # ----------------------------------------------------------------------------- + + def compute_lookback_includes(self, C, trans, nullable): + lookdict = {} # Dictionary of lookback relations + includedict = {} # Dictionary of include relations + + # Make a dictionary of non-terminal transitions + dtrans = {} + for t in trans: + dtrans[t] = 1 + + # Loop over all transitions and compute lookbacks and includes + for state, N in trans: + lookb = [] + includes = [] + for p in C[state]: + if p.name != N: + continue + + # Okay, we have a name match. We now follow the production all the way + # through the state machine until we get the . on the right hand side + + lr_index = p.lr_index + j = state + while lr_index < p.len - 1: + lr_index = lr_index + 1 + t = p.prod[lr_index] + + # Check to see if this symbol and state are a non-terminal transition + if (j, t) in dtrans: + # Yes. Okay, there is some chance that this is an includes relation + # the only way to know for certain is whether the rest of the + # production derives empty + + li = lr_index + 1 + while li < p.len: + if p.prod[li] in self.grammar.Terminals: + break # No forget it + if p.prod[li] not in nullable: + break + li = li + 1 + else: + # Appears to be a relation between (j,t) and (state,N) + includes.append((j, t)) - break # break out of the for s in K loop because we only want to make - # sure that a production is in the Kernel - + g = self.lr0_goto(C[j], t) # Go to next set + j = self.lr0_cidhash.get(id(g), -1) # Go to next state + + # When we get here, j is the final state, now we have to locate the production + for r in C[j]: + if r.name != p.name: + continue + if r.len != p.len: + continue + i = 0 + # This look is comparing a production ". A B C" with "A B C ." + while i < r.lr_index: + if r.prod[i] != p.prod[i+1]: + break + i = i + 1 else: - # We are at the end of a production. Reduce! - - for a in p.lookaheads[st]: - actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) - r = action.get((st,a),None) - if r is not None: - # Whoa. Have a shift/reduce or reduce/reduce conflict - if r > 0: - # Need to decide on shift or reduce here - # By default we favor shifting. Need to add - # some precedence rules here. - sprec,slevel = Productions[actionp[st,a].number].prec - rprec,rlevel = Precedence.get(a,('right',0)) - if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): - # We really need to reduce here. - action[st,a] = -p.number - actionp[st,a] = p - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - n_srconflict += 1 - elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: - # Hmmm. Guess we'll keep the shift - if not slevel and not rlevel: - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) - n_srconflict +=1 - elif r < 0: - # Reduce/reduce conflict. In this case, we favor the rule - # that was defined first in the grammar file - oldp = Productions[-r] - pp = Productions[p.number] - if oldp.line > pp.line: - action[st,a] = -p.number - actionp[st,a] = p - # print "Reduce/reduce conflict in state %d" % st - n_rrconflict += 1 - _vfc.write("reduce/reduce conflict in state %d resolved using rule %d.\n" % (st, actionp[st,a].number)) - _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d.\n" % (a,actionp[st,a].number)) - else: - print "Unknown conflict in state %d" % st + lookb.append((j, r)) + for i in includes: + if i not in includedict: + includedict[i] = [] + includedict[i].append((state, N)) + lookdict[(state, N)] = lookb + + return lookdict, includedict + + # ----------------------------------------------------------------------------- + # compute_read_sets() + # + # Given a set of LR(0) items, this function computes the read sets. + # + # Inputs: C = Set of LR(0) items + # ntrans = Set of nonterminal transitions + # nullable = Set of empty transitions + # + # Returns a set containing the read sets + # ----------------------------------------------------------------------------- + + def compute_read_sets(self, C, ntrans, nullable): + FP = lambda x: self.dr_relation(C, x, nullable) + R = lambda x: self.reads_relation(C, x, nullable) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # compute_follow_sets() + # + # Given a set of LR(0) items, a set of non-terminal transitions, a readset, + # and an include set, this function computes the follow sets + # + # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} + # + # Inputs: + # ntrans = Set of nonterminal transitions + # readsets = Readset (previously computed) + # inclsets = Include sets (previously computed) + # + # Returns a set containing the follow sets + # ----------------------------------------------------------------------------- + + def compute_follow_sets(self, ntrans, readsets, inclsets): + FP = lambda x: readsets[x] + R = lambda x: inclsets.get(x, []) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # add_lookaheads() + # + # Attaches the lookahead symbols to grammar rules. + # + # Inputs: lookbacks - Set of lookback relations + # followset - Computed follow set + # + # This function directly attaches the lookaheads to productions contained + # in the lookbacks set + # ----------------------------------------------------------------------------- + + def add_lookaheads(self, lookbacks, followset): + for trans, lb in lookbacks.items(): + # Loop over productions in lookback + for state, p in lb: + if state not in p.lookaheads: + p.lookaheads[state] = [] + f = followset.get(trans, []) + for a in f: + if a not in p.lookaheads[state]: + p.lookaheads[state].append(a) + + # ----------------------------------------------------------------------------- + # add_lalr_lookaheads() + # + # This function does all of the work of adding lookahead information for use + # with LALR parsing + # ----------------------------------------------------------------------------- + + def add_lalr_lookaheads(self, C): + # Determine all of the nullable nonterminals + nullable = self.compute_nullable_nonterminals() + + # Find all non-terminal transitions + trans = self.find_nonterminal_transitions(C) + + # Compute read sets + readsets = self.compute_read_sets(C, trans, nullable) + + # Compute lookback/includes relations + lookd, included = self.compute_lookback_includes(C, trans, nullable) + + # Compute LALR FOLLOW sets + followsets = self.compute_follow_sets(trans, readsets, included) + + # Add all of the lookaheads + self.add_lookaheads(lookd, followsets) + + # ----------------------------------------------------------------------------- + # lr_parse_table() + # + # This function constructs the parse tables for SLR or LALR + # ----------------------------------------------------------------------------- + def lr_parse_table(self): + Productions = self.grammar.Productions + Precedence = self.grammar.Precedence + goto = self.lr_goto # Goto array + action = self.lr_action # Action array + log = self.log # Logger for output + + actionp = {} # Action production array (temporary) + + log.info('Parsing method: %s', self.lr_method) + + # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items + # This determines the number of states + + C = self.lr0_items() + + if self.lr_method == 'LALR': + self.add_lalr_lookaheads(C) + + # Build the parser table, state by state + st = 0 + for I in C: + # Loop over each production in I + actlist = [] # List of actions + st_action = {} + st_actionp = {} + st_goto = {} + log.info('') + log.info('state %d', st) + log.info('') + for p in I: + log.info(' (%d) %s', p.number, p) + log.info('') + + for p in I: + if p.len == p.lr_index + 1: + if p.name == "S'": + # Start symbol. Accept! + st_action['$end'] = 0 + st_actionp['$end'] = p + else: + # We are at the end of a production. Reduce! + if self.lr_method == 'LALR': + laheads = p.lookaheads[st] else: - action[st,a] = -p.number - actionp[st,a] = p - else: - i = p.lr_index - a = p.prod[i+1] # Get symbol right after the "." - if Terminals.has_key(a): - g = goto_cache[(idI,a)] - j = cid_hash.get(id(g),-1) - if j >= 0: - # We are in a shift state - _k = (a,j) - if not acthash.has_key(_k): - actlist.append((a,p,"shift and go to state %d" % j)) - acthash[_k] = 1 - r = action.get((st,a),None) - if r is not None: - # Whoa have a shift/reduce or shift/shift conflict - if r > 0: - if r != j: - sys.stderr.write("Shift/shift conflict in state %d\n" % st) - elif r < 0: - # Do a precedence check. - # - if precedence of reduce rule is higher, we reduce. - # - if precedence of reduce is same and left assoc, we reduce. - # - otherwise we shift - rprec,rlevel = Productions[actionp[st,a].number].prec - sprec,slevel = Precedence.get(a,('right',0)) - if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')): - # We decide to shift here... highest precedence to shift - action[st,a] = j - actionp[st,a] = p - if not slevel and not rlevel: - n_srconflict += 1 - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) - elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: - # Hmmm. Guess we'll keep the reduce - if not slevel and not rlevel: - n_srconflict +=1 - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - + laheads = self.grammar.Follow[p.name] + for a in laheads: + actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) + r = st_action.get(a) + if r is not None: + # Whoa. Have a shift/reduce or reduce/reduce conflict + if r > 0: + # Need to decide on shift or reduce here + # By default we favor shifting. Need to add + # some precedence rules here. + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from rule being reduced (p) + rprec, rlevel = Productions[p.number].prec + + if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): + # We really need to reduce here. + st_action[a] = -p.number + st_actionp[a] = p + if not slevel and not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + Productions[p.number].reduced += 1 + elif (slevel == rlevel) and (rprec == 'nonassoc'): + st_action[a] = None + else: + # Hmmm. Guess we'll keep the shift + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) + elif r < 0: + # Reduce/reduce conflict. In this case, we favor the rule + # that was defined first in the grammar file + oldp = Productions[-r] + pp = Productions[p.number] + if oldp.line > pp.line: + st_action[a] = -p.number + st_actionp[a] = p + chosenp, rejectp = pp, oldp + Productions[p.number].reduced += 1 + Productions[oldp.number].reduced -= 1 + else: + chosenp, rejectp = oldp, pp + self.rr_conflicts.append((st, chosenp, rejectp)) + log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', + a, st_actionp[a].number, st_actionp[a]) + else: + raise LALRError('Unknown conflict in state %d' % st) else: - sys.stderr.write("Unknown conflict in state %d\n" % st) - else: - action[st,a] = j - actionp[st,a] = p + st_action[a] = -p.number + st_actionp[a] = p + Productions[p.number].reduced += 1 else: - nonterminal = a - term_list = TReductions[nonterminal] - # DB: This loop gets executed a lot. Try to optimize - for a in term_list: - g = goto_cache[(idI,a)] - j = cid_hash[id(g)] + i = p.lr_index + a = p.prod[i+1] # Get symbol right after the "." + if a in self.grammar.Terminals: + g = self.lr0_goto(I, a) + j = self.lr0_cidhash.get(id(g), -1) if j >= 0: # We are in a shift state - # Don't put repeated shift actions on action list (performance hack) - _k = (a,j) - if not acthash.has_key(_k): - actlist.append((a,p,"shift and go to state "+str(j))) - acthash[_k] = 1 - - r = action.get((st,a),None) + actlist.append((a, p, 'shift and go to state %d' % j)) + r = st_action.get(a) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: - sys.stderr.write("Shift/shift conflict in state %d\n" % st) - continue + raise LALRError('Shift/shift conflict in state %d' % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift - rprec,rlevel = Productions[actionp[st,a].number].prec - sprec,slevel = Precedence.get(a,('right',0)) - if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')): + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from the rule that could have been reduced + rprec, rlevel = Productions[st_actionp[a].number].prec + + if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift - action[st,a] = j - actionp[st,a] = p - if not slevel and not rlevel: - n_srconflict += 1 - _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a) + Productions[st_actionp[a].number].reduced -= 1 + st_action[a] = j + st_actionp[a] = p + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): - action[st,a] = None - else: + st_action[a] = None + else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: - n_srconflict +=1 - _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st) - _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a) - + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + else: - sys.stderr.write("Unknown conflict in state %d\n" % st) + raise LALRError('Unknown conflict in state %d' % st) else: - action[st,a] = j - actionp[st,a] = p - - except StandardError,e: - raise YaccError, "Hosed in lalr_parse_table", e - - # Print the actions associated with each terminal - if yaccdebug: - for a,p,m in actlist: - if action.has_key((st,a)): - if p is actionp[st,a]: - _vf.write(" %-15s %s\n" % (a,m)) - _vf.write("\n") - - for a,p,m in actlist: - if action.has_key((st,a)): - if p is not actionp[st,a]: - _vf.write(" ! %-15s [ %s ]\n" % (a,m)) - - # Construct the goto table for this state - nkeys = { } - for ii in I: - for s in ii.usyms: - if Nonterminals.has_key(s): - nkeys[s] = None - - # Construct the goto table for this state - for n in nkeys.keys(): - g = lr0_goto(I,n) - j = cid_hash.get(id(g),-1) - if j >= 0: - goto[st,n] = j - if yaccdebug: - _vf.write(" %-30s shift and go to state %d\n" % (n,j)) - - st += 1 - if yaccdebug: - if n_srconflict == 1: - sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict) - if n_srconflict > 1: - sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict) - if n_rrconflict == 1: - sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict) - if n_rrconflict > 1: - sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict) - - -# ----------------------------------------------------------------------------- -# ==== LR Utility functions ==== -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# _lr_write_tables() -# -# This function writes the LR parsing tables to a file -# ----------------------------------------------------------------------------- - -def lr_write_tables(modulename=tab_module,outputdir=''): - filename = os.path.join(outputdir,modulename) + ".py" - try: - f = open(filename,"w") + st_action[a] = j + st_actionp[a] = p + + # Print the actions associated with each terminal + _actprint = {} + for a, p, m in actlist: + if a in st_action: + if p is st_actionp[a]: + log.info(' %-15s %s', a, m) + _actprint[(a, m)] = 1 + log.info('') + # Print the actions that were not used. (debugging) + not_used = 0 + for a, p, m in actlist: + if a in st_action: + if p is not st_actionp[a]: + if not (a, m) in _actprint: + log.debug(' ! %-15s [ %s ]', a, m) + not_used = 1 + _actprint[(a, m)] = 1 + if not_used: + log.debug('') + + # Construct the goto table for this state + + nkeys = {} + for ii in I: + for s in ii.usyms: + if s in self.grammar.Nonterminals: + nkeys[s] = None + for n in nkeys: + g = self.lr0_goto(I, n) + j = self.lr0_cidhash.get(id(g), -1) + if j >= 0: + st_goto[n] = j + log.info(' %-30s shift and go to state %d', n, j) + + action[st] = st_action + actionp[st] = st_actionp + goto[st] = st_goto + st += 1 + + # ----------------------------------------------------------------------------- + # write() + # + # This function writes the LR parsing tables to a file + # ----------------------------------------------------------------------------- + + def write_table(self, tabmodule, outputdir='', signature=''): + if isinstance(tabmodule, types.ModuleType): + raise IOError("Won't overwrite existing tabmodule") + + basemodulename = tabmodule.split('.')[-1] + filename = os.path.join(outputdir, basemodulename) + '.py' + try: + f = open(filename, 'w') - f.write(""" + f.write(''' # %s # This file is automatically generated. Do not edit. +# pylint: disable=W,C,R +_tabversion = %r + +_lr_method = %r -_lr_method = %s - -_lr_signature = %s -""" % (filename, repr(_lr_method), repr(Signature.digest()))) - - # Change smaller to 0 to go back to original tables - smaller = 1 - - # Factor out names to try and make smaller - if smaller: - items = { } - - for k,v in _lr_action.items(): - i = items.get(k[1]) - if not i: - i = ([],[]) - items[k[1]] = i - i[0].append(k[0]) - i[1].append(v) - - f.write("\n_lr_action_items = {") - for k,v in items.items(): - f.write("%r:([" % k) - for i in v[0]: - f.write("%r," % i) - f.write("],[") - for i in v[1]: - f.write("%r," % i) - - f.write("]),") - f.write("}\n") - - f.write(""" -_lr_action = { } +_lr_signature = %r + ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) + + # Change smaller to 0 to go back to original tables + smaller = 1 + + # Factor out names to try and make smaller + if smaller: + items = {} + + for s, nd in self.lr_action.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_action_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): - _lr_action[(_x,_k)] = _y + if not _x in _lr_action: _lr_action[_x] = {} + _lr_action[_x][_k] = _y del _lr_action_items -""") - - else: - f.write("\n_lr_action = { "); - for k,v in _lr_action.items(): - f.write("(%r,%r):%r," % (k[0],k[1],v)) - f.write("}\n"); +''') - if smaller: - # Factor out names to try and make smaller - items = { } - - for k,v in _lr_goto.items(): - i = items.get(k[1]) - if not i: - i = ([],[]) - items[k[1]] = i - i[0].append(k[0]) - i[1].append(v) - - f.write("\n_lr_goto_items = {") - for k,v in items.items(): - f.write("%r:([" % k) - for i in v[0]: - f.write("%r," % i) - f.write("],[") - for i in v[1]: - f.write("%r," % i) - - f.write("]),") - f.write("}\n") - - f.write(""" -_lr_goto = { } + else: + f.write('\n_lr_action = { ') + for k, v in self.lr_action.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + if smaller: + # Factor out names to try and make smaller + items = {} + + for s, nd in self.lr_goto.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_goto_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_goto = {} for _k, _v in _lr_goto_items.items(): - for _x,_y in zip(_v[0],_v[1]): - _lr_goto[(_x,_k)] = _y + for _x, _y in zip(_v[0], _v[1]): + if not _x in _lr_goto: _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y del _lr_goto_items -""") - else: - f.write("\n_lr_goto = { "); - for k,v in _lr_goto.items(): - f.write("(%r,%r):%r," % (k[0],k[1],v)) - f.write("}\n"); - - # Write production table - f.write("_lr_productions = [\n") - for p in Productions: - if p: - if (p.func): - f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line)) - else: - f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len)) +''') else: - f.write(" None,\n") - f.write("]\n") - f.close() + f.write('\n_lr_goto = { ') + for k, v in self.lr_goto.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + # Write production table + f.write('_lr_productions = [\n') + for p in self.lr_productions: + if p.func: + f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, + p.func, os.path.basename(p.file), p.line)) + else: + f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) + f.write(']\n') + f.close() - except IOError,e: - print "Unable to create '%s'" % filename - print e - return + except IOError as e: + raise -def lr_read_tables(module=tab_module,optimize=0): - global _lr_action, _lr_goto, _lr_productions, _lr_method - try: - exec "import %s as parsetab" % module - - if (optimize) or (Signature.digest() == parsetab._lr_signature): - _lr_action = parsetab._lr_action - _lr_goto = parsetab._lr_goto - _lr_productions = parsetab._lr_productions - _lr_method = parsetab._lr_method - return 1 - else: - return 0 - - except (ImportError,AttributeError): - return 0 + + # ----------------------------------------------------------------------------- + # pickle_table() + # + # This function pickles the LR parsing tables to a supplied file object + # ----------------------------------------------------------------------------- + + def pickle_table(self, filename, signature=''): + try: + import cPickle as pickle + except ImportError: + import pickle + with open(filename, 'wb') as outf: + pickle.dump(__tabversion__, outf, pickle_protocol) + pickle.dump(self.lr_method, outf, pickle_protocol) + pickle.dump(signature, outf, pickle_protocol) + pickle.dump(self.lr_action, outf, pickle_protocol) + pickle.dump(self.lr_goto, outf, pickle_protocol) + + outp = [] + for p in self.lr_productions: + if p.func: + outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) + else: + outp.append((str(p), p.name, p.len, None, None, None)) + pickle.dump(outp, outf, pickle_protocol) # ----------------------------------------------------------------------------- -# yacc(module) +# === INTROSPECTION === # -# Build the parser module +# The following functions and classes are used to implement the PLY +# introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- -def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''): - global yaccdebug - yaccdebug = debug - - initialize_vars() - files = { } - error = 0 - - # Add starting symbol to signature - if start: - Signature.update(start) - - # Add parsing method to signature - Signature.update(method) - - # If a "module" parameter was supplied, extract its dictionary. - # Note: a module may in fact be an instance as well. - - if module: - # User supplied a module object. - if isinstance(module, types.ModuleType): - ldict = module.__dict__ - elif isinstance(module, types.InstanceType): - _items = [(k,getattr(module,k)) for k in dir(module)] - ldict = { } - for i in _items: - ldict[i[0]] = i[1] - else: - raise ValueError,"Expected a module" - - else: - # No module given. We might be able to get information from the caller. - # Throw an exception and unwind the traceback to get the globals - +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- + +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# parse_grammar() +# +# This takes a raw grammar rule string and parses it into production data +# ----------------------------------------------------------------------------- +def parse_grammar(doc, file, line): + grammar = [] + # Split the doc string into lines + pstrings = doc.splitlines() + lastp = None + dline = line + for ps in pstrings: + dline += 1 + p = ps.split() + if not p: + continue try: - raise RuntimeError - except RuntimeError: - e,b,t = sys.exc_info() - f = t.tb_frame - f = f.f_back # Walk out to our calling function - ldict = f.f_globals # Grab its globals dictionary - - # If running in optimized mode. We're going to - - if (optimize and lr_read_tables(tabmodule,1)): - # Read parse table - del Productions[:] - for p in _lr_productions: - if not p: - Productions.append(None) + if p[0] == '|': + # This is a continuation of a previous rule + if not lastp: + raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) + prodname = lastp + syms = p[1:] else: - m = MiniProduction() - m.name = p[0] - m.len = p[1] - m.file = p[3] - m.line = p[4] - if p[2]: - m.func = ldict[p[2]] - Productions.append(m) - - else: - # Get the tokens map - if (module and isinstance(module,types.InstanceType)): - tokens = getattr(module,"tokens",None) - else: - tokens = ldict.get("tokens",None) - - if not tokens: - raise YaccError,"module does not define a list 'tokens'" - if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)): - raise YaccError,"tokens must be a list or tuple." + prodname = p[0] + lastp = prodname + syms = p[2:] + assign = p[1] + if assign != ':' and assign != '::=': + raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) - # Check to see if a requires dictionary is defined. - requires = ldict.get("require",None) - if requires: - if not (isinstance(requires,types.DictType)): - raise YaccError,"require must be a dictionary." + grammar.append((file, dline, prodname, syms)) + except SyntaxError: + raise + except Exception: + raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) - for r,v in requires.items(): - try: - if not (isinstance(v,types.ListType)): - raise TypeError - v1 = [x.split(".") for x in v] - Requires[r] = v1 - except StandardError: - print "Invalid specification for rule '%s' in require. Expected a list of strings" % r - - - # Build the dictionary of terminals. We a record a 0 in the - # dictionary to track whether or not a terminal is actually - # used in the grammar - - if 'error' in tokens: - print "yacc: Illegal token 'error'. Is a reserved word." - raise YaccError,"Illegal token name" - - for n in tokens: - if Terminals.has_key(n): - print "yacc: Warning. Token '%s' multiply defined." % n - Terminals[n] = [ ] - - Terminals['error'] = [ ] - - # Get the precedence map (if any) - prec = ldict.get("precedence",None) - if prec: - if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)): - raise YaccError,"precedence must be a list or tuple." - add_precedence(prec) - Signature.update(repr(prec)) - - for n in tokens: - if not Precedence.has_key(n): - Precedence[n] = ('right',0) # Default, right associative, 0 precedence - - # Look for error handler - ef = ldict.get('p_error',None) - if ef: - if isinstance(ef,types.FunctionType): + return grammar + +# ----------------------------------------------------------------------------- +# ParserReflect() +# +# This class represents information extracted for building a parser including +# start symbol, error function, tokens, precedence list, action functions, +# etc. +# ----------------------------------------------------------------------------- +class ParserReflect(object): + def __init__(self, pdict, log=None): + self.pdict = pdict + self.start = None + self.error_func = None + self.tokens = None + self.modules = set() + self.grammar = [] + self.error = False + + if log is None: + self.log = PlyLogger(sys.stderr) + else: + self.log = log + + # Get all of the basic information + def get_all(self): + self.get_start() + self.get_error_func() + self.get_tokens() + self.get_precedence() + self.get_pfunctions() + + # Validate all of the information + def validate_all(self): + self.validate_start() + self.validate_error_func() + self.validate_tokens() + self.validate_precedence() + self.validate_pfunctions() + self.validate_modules() + return self.error + + # Compute a signature over the grammar + def signature(self): + parts = [] + try: + if self.start: + parts.append(self.start) + if self.prec: + parts.append(''.join([''.join(p) for p in self.prec])) + if self.tokens: + parts.append(' '.join(self.tokens)) + for f in self.pfuncs: + if f[3]: + parts.append(f[3]) + except (TypeError, ValueError): + pass + return ''.join(parts) + + # ----------------------------------------------------------------------------- + # validate_modules() + # + # This method checks to see if there are duplicated p_rulename() functions + # in the parser module file. Without this function, it is really easy for + # users to make mistakes by cutting and pasting code fragments (and it's a real + # bugger to try and figure out why the resulting parser doesn't work). Therefore, + # we just do a little regular expression pattern matching of def statements + # to try and detect duplicates. + # ----------------------------------------------------------------------------- + + def validate_modules(self): + # Match def p_funcname( + fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') + + for module in self.modules: + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + continue + + counthash = {} + for linen, line in enumerate(lines): + linen += 1 + m = fre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', + filename, linen, name, prev) + + # Get the start symbol + def get_start(self): + self.start = self.pdict.get('start') + + # Validate the start symbol + def validate_start(self): + if self.start is not None: + if not isinstance(self.start, string_types): + self.log.error("'start' must be a string") + + # Look for error handler + def get_error_func(self): + self.error_func = self.pdict.get('p_error') + + # Validate the error function + def validate_error_func(self): + if self.error_func: + if isinstance(self.error_func, types.FunctionType): ismethod = 0 - elif isinstance(ef, types.MethodType): + elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: - raise YaccError,"'p_error' defined, but is not a function or method." - eline = ef.func_code.co_firstlineno - efile = ef.func_code.co_filename - files[efile] = None - - if (ef.func_code.co_argcount != 1+ismethod): - raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline) - global Errorfunc - Errorfunc = ef - else: - print "yacc: Warning. no p_error() function is defined." - - # Get the list of built-in functions with p_ prefix - symbols = [ldict[f] for f in ldict.keys() - if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_' - and ldict[f].__name__ != 'p_error')] + self.log.error("'p_error' defined, but is not a function or method") + self.error = True + return + + eline = self.error_func.__code__.co_firstlineno + efile = self.error_func.__code__.co_filename + module = inspect.getmodule(self.error_func) + self.modules.add(module) + + argcount = self.error_func.__code__.co_argcount - ismethod + if argcount != 1: + self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) + self.error = True + + # Get the tokens map + def get_tokens(self): + tokens = self.pdict.get('tokens') + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = sorted(tokens) + + # Validate the tokens + def validate_tokens(self): + # Validate the tokens. + if 'error' in self.tokens: + self.log.error("Illegal token name 'error'. Is a reserved word") + self.error = True + return + + terminals = set() + for n in self.tokens: + if n in terminals: + self.log.warning('Token %r multiply defined', n) + terminals.add(n) + + # Get the precedence map (if any) + def get_precedence(self): + self.prec = self.pdict.get('precedence') + + # Validate and parse the precedence map + def validate_precedence(self): + preclist = [] + if self.prec: + if not isinstance(self.prec, (list, tuple)): + self.log.error('precedence must be a list or tuple') + self.error = True + return + for level, p in enumerate(self.prec): + if not isinstance(p, (list, tuple)): + self.log.error('Bad precedence table') + self.error = True + return + + if len(p) < 2: + self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) + self.error = True + return + assoc = p[0] + if not isinstance(assoc, string_types): + self.log.error('precedence associativity must be a string') + self.error = True + return + for term in p[1:]: + if not isinstance(term, string_types): + self.log.error('precedence items must be strings') + self.error = True + return + preclist.append((term, assoc, level+1)) + self.preclist = preclist + + # Get all p_functions from the grammar + def get_pfunctions(self): + p_functions = [] + for name, item in self.pdict.items(): + if not name.startswith('p_') or name == 'p_error': + continue + if isinstance(item, (types.FunctionType, types.MethodType)): + line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) + module = inspect.getmodule(item) + p_functions.append((line, module, name, item.__doc__)) + + # Sort all of the actions by line number; make sure to stringify + # modules to make them sortable, since `line` may not uniquely sort all + # p functions + p_functions.sort(key=lambda p_function: ( + p_function[0], + str(p_function[1]), + p_function[2], + p_function[3])) + self.pfuncs = p_functions + + # Validate all of the p_functions + def validate_pfunctions(self): + grammar = [] # Check for non-empty symbols - if len(symbols) == 0: - raise YaccError,"no rules of the form p_rulename are defined." - - # Sort the symbols by line number - symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno)) - - # Add all of the symbols to the grammar - for f in symbols: - if (add_function(f)) < 0: - error += 1 + if len(self.pfuncs) == 0: + self.log.error('no rules of the form p_rulename are defined') + self.error = True + return + + for line, module, name, doc in self.pfuncs: + file = inspect.getsourcefile(module) + func = self.pdict[name] + if isinstance(func, types.MethodType): + reqargs = 2 else: - files[f.func_code.co_filename] = None - - # Make a signature of the docstrings - for f in symbols: - if f.__doc__: - Signature.update(f.__doc__) - - lr_init_vars() - - if error: - raise YaccError,"Unable to construct parser." - - if not lr_read_tables(tabmodule): - - # Validate files - for filename in files.keys(): - if not validate_file(filename): - error = 1 - - # Validate dictionary - validate_dict(ldict) - - if start and not Prodnames.has_key(start): - raise YaccError,"Bad starting symbol '%s'" % start - - augment_grammar(start) - error = verify_productions(cycle_check=check_recursion) - otherfunc = [ldict[f] for f in ldict.keys() - if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')] - - if error: - raise YaccError,"Unable to construct parser." - - build_lritems() - compute_first1() - compute_follow(start) - - if method == 'SLR': - slr_parse_table() - elif method == 'LALR': - lalr_parse_table() + reqargs = 1 + if func.__code__.co_argcount > reqargs: + self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) + self.error = True + elif func.__code__.co_argcount < reqargs: + self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) + self.error = True + elif not func.__doc__: + self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', + file, line, func.__name__) else: - raise YaccError, "Unknown parsing method '%s'" % method - - if write_tables: - lr_write_tables(tabmodule,outputdir) - - if yaccdebug: try: - f = open(os.path.join(outputdir,debugfile),"w") - f.write(_vfc.getvalue()) - f.write("\n\n") - f.write(_vf.getvalue()) - f.close() - except IOError,e: - print "yacc: can't create '%s'" % debugfile,e - - # Made it here. Create a parser object and set up its internal state. - # Set global parse() method to bound method of parser object. - - p = Parser("xyzzy") - p.productions = Productions - p.errorfunc = Errorfunc - p.action = _lr_action - p.goto = _lr_goto - p.method = _lr_method - p.require = Requires + parsed_g = parse_grammar(doc, file, line) + for g in parsed_g: + grammar.append((name, g)) + except SyntaxError as e: + self.log.error(str(e)) + self.error = True + + # Looks like a valid grammar rule + # Mark the file in which defined. + self.modules.add(module) + + # Secondary validation step that looks for p_ definitions that are not functions + # or functions that look like they might be grammar rules. + + for n, v in self.pdict.items(): + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): + continue + if n.startswith('t_'): + continue + if n.startswith('p_') and n != 'p_error': + self.log.warning('%r not defined as a function', n) + if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or + (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): + if v.__doc__: + try: + doc = v.__doc__.split(' ') + if doc[1] == ':': + self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', + v.__code__.co_filename, v.__code__.co_firstlineno, n) + except IndexError: + pass + + self.grammar = grammar + +# ----------------------------------------------------------------------------- +# yacc(module) +# +# Build a parser +# ----------------------------------------------------------------------------- + +def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, + check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, + outputdir=None, debuglog=None, errorlog=None, picklefile=None): + + if tabmodule is None: + tabmodule = tab_module + # Reference to the parsing method of the last built parser global parse - parse = p.parse - - # Clean up all of the globals we created - if (not optimize): - yacc_cleanup() - return p - -# yacc_cleanup function. Delete all of the global variables -# used during table construction - -def yacc_cleanup(): - global _lr_action, _lr_goto, _lr_method, _lr_goto_cache - del _lr_action, _lr_goto, _lr_method, _lr_goto_cache - - global Productions, Prodnames, Prodmap, Terminals - global Nonterminals, First, Follow, Precedence, LRitems - global Errorfunc, Signature, Requires - global Prodempty, TReductions, NTReductions, GotoSetNum, Canonical - - del Productions, Prodnames, Prodmap, Terminals - del Nonterminals, First, Follow, Precedence, LRitems - del Errorfunc, Signature, Requires - del Prodempty, TReductions, NTReductions, GotoSetNum, Canonical - - global _vf, _vfc - del _vf, _vfc - - -# Stub that raises an error if parsing is attempted without first calling yacc() -def parse(*args,**kwargs): - raise YaccError, "yacc: No parser built with yacc()" + + # If pickling is enabled, table files are not created + if picklefile: + write_tables = 0 + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + pdict = dict(_items) + # If no __file__ or __package__ attributes are available, try to obtain them + # from the __module__ instead + if '__file__' not in pdict: + pdict['__file__'] = sys.modules[pdict['__module__']].__file__ + if '__package__' not in pdict and '__module__' in pdict: + if hasattr(sys.modules[pdict['__module__']], '__package__'): + pdict['__package__'] = sys.modules[pdict['__module__']].__package__ + else: + pdict = get_caller_module_dict(2) + + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If tabmodule specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(tabmodule, types.ModuleType): + srcfile = tabmodule.__file__ + else: + if '.' not in tabmodule: + srcfile = pdict['__file__'] + else: + parts = tabmodule.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = pdict.get('__package__') + if pkg and isinstance(tabmodule, str): + if '.' not in tabmodule: + tabmodule = pkg + '.' + tabmodule + + + + # Set start symbol if it's specified directly using an argument + if start is not None: + pdict['start'] = start + + # Collect parser information from the dictionary + pinfo = ParserReflect(pdict, log=errorlog) + pinfo.get_all() + + if pinfo.error: + raise YaccError('Unable to build parser') + + # Check signature against table files (if any) + signature = pinfo.signature() + + # Read the tables + try: + lr = LRTable() + if picklefile: + read_signature = lr.read_pickle(picklefile) + else: + read_signature = lr.read_table(tabmodule) + if optimize or (read_signature == signature): + try: + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + parse = parser.parse + return parser + except Exception as e: + errorlog.warning('There was a problem loading the table file: %r', e) + except VersionError as e: + errorlog.warning(str(e)) + except ImportError: + pass + + if debuglog is None: + if debug: + try: + debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) + except IOError as e: + errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) + debuglog = NullLogger() + else: + debuglog = NullLogger() + + debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) + + errors = False + + # Validate the parser information + if pinfo.validate_all(): + raise YaccError('Unable to build parser') + + if not pinfo.error_func: + errorlog.warning('no p_error() function is defined') + + # Create a grammar object + grammar = Grammar(pinfo.tokens) + + # Set precedence level for terminals + for term, assoc, level in pinfo.preclist: + try: + grammar.set_precedence(term, assoc, level) + except GrammarError as e: + errorlog.warning('%s', e) + + # Add productions to the grammar + for funcname, gram in pinfo.grammar: + file, line, prodname, syms = gram + try: + grammar.add_production(prodname, syms, funcname, file, line) + except GrammarError as e: + errorlog.error('%s', e) + errors = True + + # Set the grammar start symbols + try: + if start is None: + grammar.set_start(pinfo.start) + else: + grammar.set_start(start) + except GrammarError as e: + errorlog.error(str(e)) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Verify the grammar structure + undefined_symbols = grammar.undefined_symbols() + for sym, prod in undefined_symbols: + errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) + errors = True + + unused_terminals = grammar.unused_terminals() + if unused_terminals: + debuglog.info('') + debuglog.info('Unused terminals:') + debuglog.info('') + for term in unused_terminals: + errorlog.warning('Token %r defined, but not used', term) + debuglog.info(' %s', term) + + # Print out all productions to the debug log + if debug: + debuglog.info('') + debuglog.info('Grammar') + debuglog.info('') + for n, p in enumerate(grammar.Productions): + debuglog.info('Rule %-5d %s', n, p) + + # Find unused non-terminals + unused_rules = grammar.unused_rules() + for prod in unused_rules: + errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) + + if len(unused_terminals) == 1: + errorlog.warning('There is 1 unused token') + if len(unused_terminals) > 1: + errorlog.warning('There are %d unused tokens', len(unused_terminals)) + + if len(unused_rules) == 1: + errorlog.warning('There is 1 unused rule') + if len(unused_rules) > 1: + errorlog.warning('There are %d unused rules', len(unused_rules)) + + if debug: + debuglog.info('') + debuglog.info('Terminals, with rules where they appear') + debuglog.info('') + terms = list(grammar.Terminals) + terms.sort() + for term in terms: + debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) + + debuglog.info('') + debuglog.info('Nonterminals, with rules where they appear') + debuglog.info('') + nonterms = list(grammar.Nonterminals) + nonterms.sort() + for nonterm in nonterms: + debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) + debuglog.info('') + + if check_recursion: + unreachable = grammar.find_unreachable() + for u in unreachable: + errorlog.warning('Symbol %r is unreachable', u) + + infinite = grammar.infinite_cycles() + for inf in infinite: + errorlog.error('Infinite recursion detected for symbol %r', inf) + errors = True + + unused_prec = grammar.unused_precedence() + for term, assoc in unused_prec: + errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Run the LRGeneratedTable on the grammar + if debug: + errorlog.debug('Generating %s tables', method) + + lr = LRGeneratedTable(grammar, method, debuglog) + + if debug: + num_sr = len(lr.sr_conflicts) + + # Report shift/reduce and reduce/reduce conflicts + if num_sr == 1: + errorlog.warning('1 shift/reduce conflict') + elif num_sr > 1: + errorlog.warning('%d shift/reduce conflicts', num_sr) + + num_rr = len(lr.rr_conflicts) + if num_rr == 1: + errorlog.warning('1 reduce/reduce conflict') + elif num_rr > 1: + errorlog.warning('%d reduce/reduce conflicts', num_rr) + + # Write out conflicts to the output file + if debug and (lr.sr_conflicts or lr.rr_conflicts): + debuglog.warning('') + debuglog.warning('Conflicts:') + debuglog.warning('') + + for state, tok, resolution in lr.sr_conflicts: + debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) + + already_reported = set() + for state, rule, rejected in lr.rr_conflicts: + if (state, id(rule), id(rejected)) in already_reported: + continue + debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + debuglog.warning('rejected rule (%s) in state %d', rejected, state) + errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + errorlog.warning('rejected rule (%s) in state %d', rejected, state) + already_reported.add((state, id(rule), id(rejected))) + + warned_never = [] + for state, rule, rejected in lr.rr_conflicts: + if not rejected.reduced and (rejected not in warned_never): + debuglog.warning('Rule (%s) is never reduced', rejected) + errorlog.warning('Rule (%s) is never reduced', rejected) + warned_never.append(rejected) + + # Write the table file if requested + if write_tables: + try: + lr.write_table(tabmodule, outputdir, signature) + if tabmodule in sys.modules: + del sys.modules[tabmodule] + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) + + # Write a pickled version of the tables + if picklefile: + try: + lr.pickle_table(picklefile, signature) + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) + + # Build the parser + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + + parse = parser.parse + return parser diff --git a/test/ccg2xml/arabic/arabic-grammar.xml b/test/ccg2xml/arabic/arabic-grammar.xml new file mode 100644 index 0000000..f8a4250 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-grammar.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/arabic/arabic-lexicon.xml b/test/ccg2xml/arabic/arabic-lexicon.xml new file mode 100644 index 0000000..32569a6 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-lexicon.xml @@ -0,0 +1,879 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/arabic/arabic-morph.xml b/test/ccg2xml/arabic/arabic-morph.xml new file mode 100644 index 0000000..eac8625 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-morph.xml @@ -0,0 +1,1282 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/arabic/arabic-rules.xml b/test/ccg2xml/arabic/arabic-rules.xml new file mode 100644 index 0000000..a7be814 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-rules.xml @@ -0,0 +1,145 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/arabic/arabic-testbed.xml b/test/ccg2xml/arabic/arabic-testbed.xml new file mode 100644 index 0000000..6c17a76 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-testbed.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/arabic/arabic-types.xml b/test/ccg2xml/arabic/arabic-types.xml new file mode 100644 index 0000000..8c02287 --- /dev/null +++ b/test/ccg2xml/arabic/arabic-types.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/diaspace/grammar.xml b/test/ccg2xml/diaspace/grammar.xml new file mode 100644 index 0000000..4723210 --- /dev/null +++ b/test/ccg2xml/diaspace/grammar.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + diff --git a/test/ccg2xml/diaspace/lexicon.xml b/test/ccg2xml/diaspace/lexicon.xml new file mode 100644 index 0000000..1641503 --- /dev/null +++ b/test/ccg2xml/diaspace/lexicon.xml @@ -0,0 +1,12168 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/diaspace/morph.xml b/test/ccg2xml/diaspace/morph.xml new file mode 100644 index 0000000..e295a5a --- /dev/null +++ b/test/ccg2xml/diaspace/morph.xml @@ -0,0 +1,1691 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +s + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/diaspace/rules.xml b/test/ccg2xml/diaspace/rules.xml new file mode 100644 index 0000000..bd29904 --- /dev/null +++ b/test/ccg2xml/diaspace/rules.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/ccg2xml/diaspace/types.xml b/test/ccg2xml/diaspace/types.xml new file mode 100644 index 0000000..bfa1695 --- /dev/null +++ b/test/ccg2xml/diaspace/types.xml @@ -0,0 +1,799 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/grammar_template/grammar_template-grammar.xml b/test/ccg2xml/grammar_template/grammar_template-grammar.xml new file mode 100644 index 0000000..b23d3c9 --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-grammar.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/grammar_template/grammar_template-lexicon.xml b/test/ccg2xml/grammar_template/grammar_template-lexicon.xml new file mode 100644 index 0000000..2eb2e35 --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-lexicon.xml @@ -0,0 +1,2 @@ + + diff --git a/test/ccg2xml/grammar_template/grammar_template-morph.xml b/test/ccg2xml/grammar_template/grammar_template-morph.xml new file mode 100644 index 0000000..3077cf8 --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-morph.xml @@ -0,0 +1,2 @@ + + diff --git a/test/ccg2xml/grammar_template/grammar_template-rules.xml b/test/ccg2xml/grammar_template/grammar_template-rules.xml new file mode 100644 index 0000000..5ef8eaa --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-rules.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/grammar_template/grammar_template-testbed.xml b/test/ccg2xml/grammar_template/grammar_template-testbed.xml new file mode 100644 index 0000000..a5c1e52 --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-testbed.xml @@ -0,0 +1,2 @@ + + diff --git a/test/ccg2xml/grammar_template/grammar_template-types.xml b/test/ccg2xml/grammar_template/grammar_template-types.xml new file mode 100644 index 0000000..3788554 --- /dev/null +++ b/test/ccg2xml/grammar_template/grammar_template-types.xml @@ -0,0 +1,2 @@ + + diff --git a/test/ccg2xml/inherit/inherit-grammar.xml b/test/ccg2xml/inherit/inherit-grammar.xml new file mode 100644 index 0000000..b7c07bc --- /dev/null +++ b/test/ccg2xml/inherit/inherit-grammar.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/inherit/inherit-lexicon.xml b/test/ccg2xml/inherit/inherit-lexicon.xml new file mode 100644 index 0000000..c9c6790 --- /dev/null +++ b/test/ccg2xml/inherit/inherit-lexicon.xml @@ -0,0 +1,264 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/inherit/inherit-morph.xml b/test/ccg2xml/inherit/inherit-morph.xml new file mode 100644 index 0000000..8f1a3c8 --- /dev/null +++ b/test/ccg2xml/inherit/inherit-morph.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/test/ccg2xml/inherit/inherit-rules.xml b/test/ccg2xml/inherit/inherit-rules.xml new file mode 100644 index 0000000..192643d --- /dev/null +++ b/test/ccg2xml/inherit/inherit-rules.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/inherit/inherit-testbed.xml b/test/ccg2xml/inherit/inherit-testbed.xml new file mode 100644 index 0000000..3395633 --- /dev/null +++ b/test/ccg2xml/inherit/inherit-testbed.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/inherit/inherit-types.xml b/test/ccg2xml/inherit/inherit-types.xml new file mode 100644 index 0000000..fd41d5d --- /dev/null +++ b/test/ccg2xml/inherit/inherit-types.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/test/ccg2xml/tiny/tiny-grammar.xml b/test/ccg2xml/tiny/tiny-grammar.xml new file mode 100644 index 0000000..ea12de7 --- /dev/null +++ b/test/ccg2xml/tiny/tiny-grammar.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/tiny/tiny-lexicon.xml b/test/ccg2xml/tiny/tiny-lexicon.xml new file mode 100644 index 0000000..5668c3e --- /dev/null +++ b/test/ccg2xml/tiny/tiny-lexicon.xml @@ -0,0 +1,473 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tiny/tiny-morph.xml b/test/ccg2xml/tiny/tiny-morph.xml new file mode 100644 index 0000000..a8c2a46 --- /dev/null +++ b/test/ccg2xml/tiny/tiny-morph.xml @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tiny/tiny-rules.xml b/test/ccg2xml/tiny/tiny-rules.xml new file mode 100644 index 0000000..456cf11 --- /dev/null +++ b/test/ccg2xml/tiny/tiny-rules.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tiny/tiny-testbed.xml b/test/ccg2xml/tiny/tiny-testbed.xml new file mode 100644 index 0000000..6f57d8a --- /dev/null +++ b/test/ccg2xml/tiny/tiny-testbed.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tiny/tiny-types.xml b/test/ccg2xml/tiny/tiny-types.xml new file mode 100644 index 0000000..2ecd2d9 --- /dev/null +++ b/test/ccg2xml/tiny/tiny-types.xml @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-grammar.xml b/test/ccg2xml/tinytiny/tinytiny-grammar.xml new file mode 100644 index 0000000..ccdc920 --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-grammar.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-lexicon.xml b/test/ccg2xml/tinytiny/tinytiny-lexicon.xml new file mode 100644 index 0000000..e2c2e6e --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-lexicon.xml @@ -0,0 +1,227 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-morph.xml b/test/ccg2xml/tinytiny/tinytiny-morph.xml new file mode 100644 index 0000000..7f86378 --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-morph.xml @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-rules.xml b/test/ccg2xml/tinytiny/tinytiny-rules.xml new file mode 100644 index 0000000..4e835c2 --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-rules.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-testbed.xml b/test/ccg2xml/tinytiny/tinytiny-testbed.xml new file mode 100644 index 0000000..daf5d4b --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-testbed.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/test/ccg2xml/tinytiny/tinytiny-types.xml b/test/ccg2xml/tinytiny/tinytiny-types.xml new file mode 100644 index 0000000..2f9408c --- /dev/null +++ b/test/ccg2xml/tinytiny/tinytiny-types.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +